diff --git a/_includes/figure.html b/_includes/figure.html new file mode 100644 index 00000000..eb3dc28f --- /dev/null +++ b/_includes/figure.html @@ -0,0 +1,35 @@ +{%- assign img_path = include.path | remove: ".jpg" | remove: ".jpeg" | remove: ".png" | remove: ".tiff" -%} + +
+ + + {% if site.imagemagick.enabled %} + {% for i in site.imagemagick.widths -%} + + {% endfor -%} + {% endif %} + + + + + + {%- if include.caption -%}
{{ include.caption }}
{%- endif %} + +
diff --git a/_includes/scripts/analytics.html b/_includes/scripts/analytics.html new file mode 100644 index 00000000..4a345d3e --- /dev/null +++ b/_includes/scripts/analytics.html @@ -0,0 +1,18 @@ +{%- if site.enable_google_analytics -%} + + + +{%- endif -%} +{%- if site.enable_cronitor_analytics -%} + + + +{%- endif -%} \ No newline at end of file diff --git a/_includes/scripts/progressBar.html b/_includes/scripts/progressBar.html new file mode 100644 index 00000000..1a3aa450 --- /dev/null +++ b/_includes/scripts/progressBar.html @@ -0,0 +1,80 @@ +{% if site.enable_progressbar %} + + + + +{%- endif %} \ No newline at end of file diff --git a/_layouts/distill.html b/_layouts/distill.html index f9459f1d..bc323ce8 100644 --- a/_layouts/distill.html +++ b/_layouts/distill.html @@ -1,15 +1,21 @@ + - {% include head.html %} + {%- include head.html %} + + {% include scripts/jquery.html %} {% include scripts/mathjax.html %} + + {% if page._styles %} + - {% endif %} + {%- endif %} @@ -18,7 +24,7 @@ "description": "{{ page.description }}", "published": "{{ page.date | date: '%B %-d, %Y' }}", "authors": [ - {% for author in page.authors %} + {% for author in page.authors -%} { "author": "{{ author.name }}", "authorURL": "{{ author.url }}", @@ -48,14 +54,12 @@ } - + - - {% include header.html %} + {%- include header.html %} -
@@ -66,6 +70,24 @@

{{ page.title }}

+ {% if page.toc -%} + + + + {%- endif %} + {{ content }} @@ -74,15 +96,22 @@

{{ page.title }}

+ + + {%- if site.disqus_shortname and page.disqus_comments -%} + {% include disqus.html %} + {%- endif %} + {%- if site.giscus.repo and page.giscus_comments -%} + {% include giscus.html %} + {%- endif -%} +
+ {%- include footer.html %} - {% include footer.html %} - + {% include scripts/bootstrap.html %} + {% include scripts/analytics.html %} + {% include scripts/progressBar.html %} - - - - diff --git a/_pages/members.md b/_pages/members.md index 88fd9754..b21acf0e 100644 --- a/_pages/members.md +++ b/_pages/members.md @@ -206,6 +206,17 @@ order: 1
+
+
+ + +
+
+ Samuele Cornell +
+
+
+

diff --git a/_pages/sphinx-lunch.md b/_pages/sphinx-lunch.md index 369665b1..566dfbd4 100644 --- a/_pages/sphinx-lunch.md +++ b/_pages/sphinx-lunch.md @@ -25,15 +25,34 @@ A tentative schedule can be found [here](https://docs.google.com/spreadsheets/d/ ## Future Talks (tentative schedule) -- October 5, 2023 +- November 9, 2023 + - Title: Universal Speech Enhancement: What Can We Do With Real Data? + - Speaker: Wangyou Zhang + - Abstract: Speech enhancement (SE) methods based on deep learning have shown impressive performance on many simulation conditions (TIMIT/WSJ/Librispeech/...+Noise), whereas the generalization to a wider range of real conditions has not been addressed. In fact, many high-performing SE methods tend to overfit the simulation condition in training, whose inductive bias may be easily violated in real conditions. In the era of large-scale pre-training, it is natural to ask whether we can make use of the large-scale real recording data to train a truly universal SE model that can be used for all speech-as-input tasks in real-world conditoins. In this talk, I try to answer the following two questions by summarizing exisiting works on these directions: 1) what can we do to utilize real data for SE training? 2) what models can be used to achieve universal SE? Finally, I will finish the talk by proposing new problems in the related topics. + +- Novemver 16, 2023 - Title: TBD - - Speaker: Grant Strimel (Amazon) + - Speaker: Zhong-Qiu Wang + - Abstract: TBD + +## Previous Talks + +- November 2, 2023 + - Title: Music generation with precise control + - Speakers: Chris Donahue and Shih-Lun Wu + - Abstract: In the first half of the session, Chris will discuss some recent work on generating music with precise control and composable outputs. Music audio generation has seen an explosion of activity - we now have the ability to generate music in broad styles with natural language control. However, despite the impressive breadth of these models, they have not yet had a salient impact on music in the real world. Instead, music AI models with more narrow capabilities have had disproportionate impact (e.g. source separation, voice cloning). In this talk, Chris will argue that current narrow models are more appealing to creators because they offer more creative potential for two reasons: (i) they offer precise and familiar forms of control, and (ii) their outputs are composable and integrate with conventional workflows. Chris will discuss two of his recent papers, SingSong (Donahue+ 23) and the Anticipatory Music Transformer (Thickstun+ 23) which seek to bring more creative potential to broadly-capable music generative models. In the second half of the session, Shih-Lun will introduce his recent work, Music ControlNet (Wu+ 23, unpublished), which imbues diffusion-based text-to-music generation models with precise melody, dynamics, and rhythm controls. Music ControlNet builds upon the ControlNet line of research in image generation, and adapts their framework to accept time-varying controls in audio domain. Shih-Lun will demonstrate that Music ControlNet can respond precisely to any composition of the controls it has been trained on, and can also generalize to out-of-distribution control signals that creators may realistically provide. - October 12, 2023 - Title: Computational Audition through Imprecise labels - Speaker: Ankit Shah + - Abstract: In this talk, we delve into computational auditory processing to mimic how humans and animals interpret sounds to interact with their surroundings effectively. The journey begins with the machine's challenge to recognize a vast array of sounds limited by the known sounds in our datasets. This limitation becomes glaring as current models require large labeled datasets for accuracy, which often isn't feasible in real-world settings due to data scarcity. We then spotlight core issues: the strength of sound labels within available datasets. The quandary is that even with a fraction of known sounds and limited data, inaccuracies in sound labeling lead to suboptimal models. Our focus shifts to devising strategies for sound modeling amidst inaccurate, weak or incomplete labels, termed as working with imprecise labeled data. Our exploration includes enhancing the existing annotations, understanding the effects of label noise and corruption, and innovating a co-training approach for learning sound events from web data without human intervention. We venture into exploiting additional cues like event counts and durations with negligible extra effort, introducing the concept of semi-weak labels. Lastly, the talk describes a unified framework encapsulating all our approaches, making a robust model capable of handling various labeling scenarios, paving a solid foundation for future endeavors in understanding and modeling the world of images (transferrable to sounds), irrespective of label availability. Through this, we aspire to bridge the gap between the human brain's natural sound-processing ability and machines, opening doors to a more harmonious interaction with the acoustic world around us. + - Bio: Ankit Shah is a Ph.D. student in the Language Technologies Institute in the School of Computer Science at Carnegie Mellon University. Ankit earned his master's in Language technologies at Carnegie Mellon University in 2019 and his bachelor's in electronics and communication engineering from the National Institute of Technology Karnataka Surathkal. He has worked in the industry for over 4 years as a verification engineer and project lead at ARM and as a Deep learning research Scientist at ReviveMed before joining the Ph.D. program. His areas of interest are audio understanding, machine learning, and deep learning. His thesis focuses on learning in the presence of weak, uncertain, and incomplete labels, where he has made several key contributions, including the setting up DCASE challenges on the topic. He has won the Gandhian Young Technological Innovator (GYTI) award in India for his contribution to building a never-ending learner of sound systems. His team recently emerged as a winning team in the NYC AI Hackathon challenge on LLM (Large Language Model and generative AI. He enjoys reading several books during the year, listens to music, and loves to travel. Further, he is keenly interested in Economics, Startups, Entrepreneurship, etc. Website: https://ankitshah009.github.io -## Previous Talks +- October 5, 2023 + - Title: Adaptive Non-Causality for Speech Recognition + - Speaker: Grant Strimel (Amazon) + - Abstract: Streaming speech recognition architectures are employed for low-latency, real-time applications. Such architectures are often characterized by their causality – how much forward context is consumed before making a prediction on an individual frame. In this talk we will review prior approaches to balance competing objectives of low latency and the accuracy benefit derived from “look ahead” information. We then will discuss an approach we proposed called the Adaptive Non-Causal Attention Transducer (ANCAT). The architecture is non-causal in the traditional sense, but executes in a low-latency, streaming manner by dynamically choosing when to rely on future context and to what degree within the audio stream. The resulting mechanism, when coupled with novel regularization algorithms (which we will dive into) , delivers comparable accuracy to non-causal configurations while improving significantly upon latency, closing the gap with their fully-causal model counterparts. + - Bio: Grant Strimel is a Principal Scientist at Amazon AGI and part of the Alexa Speech Recognition and Deep Learning groups. He joined Alexa Pittsburgh in 2018 where the organization has now grown to over fifty scientists and engineers working on natural language processing experiences through both edge-first and cloud-centric solutions. His primary focus for Amazon has been on low-latency, real-time ML design for speech applications. - September 28, 2023 - Title: Towards robust speech generation diff --git a/_plugins/details.rb b/_plugins/details.rb new file mode 100644 index 00000000..fa12bf26 --- /dev/null +++ b/_plugins/details.rb @@ -0,0 +1,24 @@ +# Code from http://movb.de/jekyll-details-support.html + +module Jekyll + module Tags + class DetailsTag < Liquid::Block + + def initialize(tag_name, markup, tokens) + super + @caption = markup + end + + def render(context) + site = context.registers[:site] + converter = site.find_converter_instance(::Jekyll::Converters::Markdown) + caption = converter.convert(@caption).gsub(/<\/?p[^>]*>/, '').chomp + body = converter.convert(super(context)) + "
#{caption}#{body}
" + end + + end + end + end + + Liquid::Template.register_tag('details', Jekyll::Tags::DetailsTag) \ No newline at end of file diff --git a/_posts/2022-01-16-11692-2023s.md b/_posts/2022-01-16-11692-2023s.md index a82d8e26..54c5d5a1 100644 --- a/_posts/2022-01-16-11692-2023s.md +++ b/_posts/2022-01-16-11692-2023s.md @@ -33,7 +33,7 @@ Description here - We will use [gradescope](https://www.gradescope.com/courses/496788) -### Syllable +### Syllabus - This is a tentative schedule. - The slides will be uploaded right before the lecture (in piazza). - The vidoes will be uploaded irregulaly after the lecture due to the edit process (in piazza). diff --git a/_posts/2022-08-29-11751-2022f.md b/_posts/2022-08-29-11751-2022f.md index abe337b9..236ee7da 100644 --- a/_posts/2022-08-29-11751-2022f.md +++ b/_posts/2022-08-29-11751-2022f.md @@ -34,7 +34,7 @@ Description here - We will use [gradescope](https://www.gradescope.com/courses/412024) -### Syllable +### Syllabus - This is a tentative schedule. - The slides will be uploaded right before the lecture. - The vidoes will be uploaded irregulaly after the lecture due to the edit process. diff --git a/_posts/2023-08-27-11751-2023f.md b/_posts/2023-08-27-11751-2023f.md index e5671606..c453d62f 100644 --- a/_posts/2023-08-27-11751-2023f.md +++ b/_posts/2023-08-27-11751-2023f.md @@ -34,7 +34,7 @@ Description here - We will use [gradescope](https://www.gradescope.com/courses/564396) -### Syllable +### Syllabus - This is a tentative schedule. - The slides will be uploaded right before the lecture. - The vidoes will be uploaded irregulaly after the lecture due to the edit process. diff --git a/_posts/2023-10-03-foundations.md b/_posts/2023-10-03-foundations.md new file mode 100644 index 00000000..cf75dd8d --- /dev/null +++ b/_posts/2023-10-03-foundations.md @@ -0,0 +1,158 @@ +--- +layout: distill +title: Foundations for Speech Foundation Models +description: A summary of our recent work at WAVLab towards building large-scale speech foundation models +date: 2023-10-03 +giscus_comments: true + +authors: + - name: William Chen + url: "https://wanchichen.github.io/" + affiliations: + name: Carnegie Mellon University + - name: Shinji Watanabe + url: "https://sites.google.com/view/shinjiwatanabe" + affiliations: + name: Carnegie Mellon University + +bibliography: 2023-09-24-foundations.bib + +toc: + - name: "YODAS: 420k Hours of Annotated Multilingual Data" + - name: "OWSM: Understanding Large-scale Weak Supervision" + - name: "WavLabLM: Multilingual Self-Supervised Speech Representations" + - name: "The ML-SUPERB Challenge: Community-Driven Benchmarking for over 150 Languages" + # if a section has subsections, you can add them as follows: + # subsections: + # - name: Example Child Subsection 1 + # - name: Example Child Subsection 2 + +# Below is an example of injecting additional post-specific styles. +# If you use this post as a template, delete this _styles block. +_styles: > + .fake-img { + background: #bbb; + border: 1px solid rgba(0, 0, 0, 0.1); + box-shadow: 0 0px 4px rgba(0, 0, 0, 0.1); + margin-bottom: 12px; + } + .fake-img p { + font-family: monospace; + color: white; + text-align: left; + margin: 12px 0; + text-align: center; + font-size: 16px; + } + +--- + +## Introduction +The explosion in generative AI has taken the world by storm: powerful pretrained models like GPT-4 and Stable Diffusion have already entered the mainstream media and consumer pockets. While the trend towards large-scale models is no different in speech, a concensus has yet to be seen on what techniques will drive the speech foundation models of tomorrow. To help enable this progress, we are very excited to share the techniques and resources we have been developing at WAVLab, many of which will be publicly released for both academic and commerical use in the coming weeks. This blog will talk about four works in particular: + +- YODAS: An open-source multilingual dataset with over 420k hours of annotated data +- OWSM: A transparent reproduction of OpenAI's Whisper from scratch +- WavLabLM: Joint denoising for cross-lingual speech representation learning +- ML-SUPERB Challenge: A community driven speech benchmark for 154 languages + +If you're reading this in 2023, these works will be presented at [ASRU](http://www.asru2023.org/). Come visit our presentations in Taipei if you are interested in more details! Be sure to check out the Colab demos for OWSM and WavLabLM linked below too. + +## YODAS: 420k Hours of Annotated Multilingual Data +{% details Authors %} +*Xinjian Li, Shinnosuke Takamichi, Takaaki Saeki, William Chen, Sayaka Shiota, Shinji Watanabe* +{% enddetails %} +  + +*Paper and Data coming soon* + +Unlike text-driven Large Language Models, many spoken language tasks are inherently multi-modal: we often interact with these speech models through text, either as an input or output. This makes paired speech-text data a neccessity, but it is much more difficult to acquire compared to unpaired speech or unpaired text. Companies like Google and Meta are able to train large-scale speech foundation models through their access to considerable amounts of internal paired data that remain unreleased, often due to privacy or copyright restrictions. **How can researchers train more powerful models using the newest techniques, without access to sufficient amounts of data?** + +Our answer is YODAS, a Youtube-Oriented Dataset for Audio and Speech that consists of **over 500k hours of speech data across 140 languages, with 420k hours of the data having paired textual transcripts**. To create YODAS, we extensively crawled Youtube for about half a year, collecting both audio data and the provided transcriptions. These transcriptions however, are not synced with the speech. We need to first align each sentence in the transcript to timestamps in the audio, after which we can segment the audio into smaller clips. Without this step, the audio would be too long and not fit into the GPU for model training. + +To perform the segmentation, we used a pre-trained acoustic model to align the speech and text. Along with the speech-text alignments, the model also gives us a score based off the CTC loss. We can thus use this as a metric to determine the quality of the aligned speech/text and filter out poor quality samples. A per-language and per-writing system breakdown of the filtered dataset are shown below: + +{% include figure.html path="assets/img/blog/yodas_langs.png" class="img-fluid rounded z-depth-0" zoomable=true %} +
+Total duration (measured in hours) of the dataset for the top 25 languages. Manual subtitles (blue) are uploaded by the user, while automatic subtitles are generated by Youtube (orange). +
+ +{% include figure.html path="assets/img/blog/yodas_chars.png" class="img-fluid rounded z-depth-0" zoomable=true %} +
+Number of occurences of each character type in the YODAS transcripts, on a log-scale. +
+ +**Most importantly, we only crawled videos released with a Creative Commons 3.0 License, meaning all of our data can be made open-source and even used commerically!** We plan to release the data over HuggingFace Datasets in the next months, so stay tuned! If you're interested in more details about our crawling method or data distribution, the paper will also be released on arXiv soon. + +## OWSM: Understanding Large-scale Weak Supervision + +{% details Authors %} +*Yifan Peng, Jinchuan Tian, Brian Yan, Dan Berrebbi, Xuankai Chang, Xinjian Li, Jiatong Shi, Siddhant Arora, William Chen, Roshan Sharma, Wangyou Zhang, Yui Sudo, Muhammad Shakeel, Jee-weon Jung, Soumi Maiti, Shinji Watanabe* +{% enddetails %} +  + + +[Model](https://huggingface.co/pyf98/owsm_v3) | [Paper](https://arxiv.org/abs/2309.13876) | [Code](https://github.com/espnet/espnet/tree/master/egs2/mixed_v3/s2t1) | [Demo](https://colab.research.google.com/drive/1tJpY0GTWBQvoXPtiznJ78jb9Bt4xWOMe?usp=sharing) + +While the attention of speech researchers has been mostly occupied by self-supervised BERT-style models in the past several years, the introduction of Whisper has significantly strengthened the case for semi-supervised / weakly-supervised models. Whisper is trained on an extremely large scale collection of paired speech/text data, sacrificing data quality for quantity. This leads to very impressive zero-shot performance on new domains and tasks, such as unseen speech translation pairs and code-switched ASR . + +But using such large-scale proprietary models for research is risky. As the scale of AI models grow, the chance of data corruption only gets higher. **How can researchers understand the capabilites of these models without knowing the data they are trained on?** Our goal is to produce a model with the capabilities of Whisper, but with full transparency on the training data. We are excited to share our first steps towards this direction: OWSM (Open Whisper-style Speech Model, pronounced "Awesome!"). + +Similar to Whisper, OWSM is a Transformer encoder-decoder trained on 30 second segments of paired speech/text data. The model is trained to perform multiple tasks, such as ASR, language identification, speech translation, and timestamp prediction. However, there are also a few key differences. OWSM downsamples the input by 4 times instead of 2 times, for better training efficiency. We also employ an auxilliary CTC loss, which stabilizes training. It allows OWSM to perform joint CTC/attention decoding, which helps prevents repeated tokens and makes inference parameters easier to tune. Finally, OWSM supports any-to-any speech translation, while Whisper can only perform any-to-English. + +{% include figure.html path="assets/img/blog/owsm_pipeline.png" class="img-fluid rounded z-depth-0" zoomable=true %} +
+Training pipeline of OWSM. +
+ +OWSM is trained exclusively on publicly accessible datasets, which totals to over 180k hours of speech, roughly a quarter to that of Whisper's 680k. **This makes OWSM by far the largest speech model trained by an academic group, and rivals many proposed by industrial research labs.** Training the final version of OWSM took 10 days on 64 A100 40GB GPUs, or about 15,400 GPU hours. Counting our abalations and scaling experiments, we consumed around 36,000 total GPU hours, or about half of our computational budget for the whole year! **We will be working to scale OWSM to 1 million hours of data. So if you want collaborate/sponsor the next generation of spoken language models, don't hesitate to reach out!** + + +## WavLabLM: Multilingual Self-Supervised Speech Representations +{% details Authors %} +*William Chen, Jiatong Shi, Brian Yan, Dan Berrebbi, Wangyou Zhang, Yifan Peng, Xuankai Chang, Soumi Maiti, Shinji Watanabe* +{% enddetails %} +  + +[Model](https://huggingface.co/espnet/WavLabLM-MS-40k) | [Paper](https://arxiv.org/abs/2309.15317) | *Code coming soon* | [Demo](https://colab.research.google.com/drive/1xfWfWe2cOwq2R0bPATAgmTu2akAFoab_?usp=sharing) + +Supervised models like OWSM and Whisper have impressive few-shot or zero-shot capabilities, but they still rely upon paired speech/text data, which always be more expensive to obtain than unlabeled speech. Thus from a practical standpoint, pure self-supervised learning is still necessary to extend speech technologies to more universal applications, such as speech processing for more languages. Encoders such as WavLM and HuBERT learn powerful speech representations using only unlabeled data, allowing them to achieve strong results with only small amounts of fine-tuning. However, most of these state-of-the-art models are pre-trained only English, which is sub-optimal for training models for low-resource languages due to the linguistic gap. + +Of course, there has been a plethora of existing work on multilingual self-supervised speech models. XLS-R 53, XLSR-128, and MMS are all open-source self-supervised speech encoders trained on large amounts of unlabeled multilingual speech. But they all use the older wav2vec 2.0 pre-training objective, which has been shown to be outperformed by masked prediction models like WavLM and HuBERT . In fact, stronger multilingual speech encoders that use this type of pre-training exist, but they remain unreleased to the public . **To address this, we released WavLabLM, a self-supervised speech encoder trained on 40k hours of data across 136 languages. WavLabLM extends WavLM's state-of-the-art technique of joint denoising and prediction approach to multilingual speech, allowing it to achieve comparable performance to the wav2vec 2.0-based models with much less pre-training data.** + +WavLabLM is built on the discrete masked-prediction technique proposed by HuBERT. We first extract self-supervised representations from the unlabeled speech using a HuBERT model, which are then quantized into discrete units via k-means clustering. Random portions of the input speech is masked and fed into WavLabLM, which must predict the corresponding discrete units of the masked speech using the information in the unmasked speech. Furthermore, the input speech is augmented by random distractors the model must avoid. In every training step, we randomly sample another utterance or some random noise to mix into the actual input. This is the denoising portion of the pre-training approach, allowing the model to become more robust to noise and not overfit to clean single-speaker speech. + +{% include figure.html path="assets/img/blog/wavlablm.png" class="img-fluid rounded z-depth-0" zoomable=true %} +
+WavLabLM's Multi-stage pipeline. +
+ +During this process, we found that multilingual pre-training introduces new complications. One of the most important is the language imbalance of the data. Low-resource languages, which consist of a smaller portion of the dataset, are seen less often during pre-training. This leads to degraded downstream performance and dampens the cross-lingual transfer learning capabilities of the model. A popular approach is to upsample low-resource languages, but that may require significant amounts of tuning and thus spending compute we do not have. **We instead propose a simple multi-stage approach. We first pre-train WavLabLM on the full unbalanced dataset, and then further pre-train it for only a few steps on a balanced subset. This proved to be important in improving performance on the ML-SUPERB Benchmark, particularly in tasks involving languages other than those from West Europe or East Asia.** + +## The ML-SUPERB Challenge: Community-Driven Benchmarking for over 150 Languages +{% details Authors %} +*Jiatong Shi, William Chen, Dan Berrebbi, Hsiu-Hsuan Wang, Wei-Ping Huang, En-Pei Hu, Ho-Lam Chuang, Xuankai Chang, Yuxun Tang, Shang-Wen Li, Abdelrahman Mohamed, Hung-yi Lee, Shinji Watanabe* +{% enddetails %} +  + +[Paper](https://arxiv.org/abs/2310.05513) + +Speech enjoys a variety of self-supervised models, all of which use different types of architectures or pre-training tasks. *But how do you know which models are the best for a given task?* Traditionally, the [SUPERB Benchmark](https://superbbenchmark.org/leaderboard) has been the go-to resource for answering this question. It tests the ability of these models across various speech processing tasks, ranging from speaker identification to speech recognition. However, all of the tasks in SUPERB are in English. So while it can answer the aforementioned question well, another one remains open: **What are the best models for a given language?** We sought to answer this question when we developed the Multilingual SUPERB (ML-SUPERB) Benchmark . + +ML-SUPERB benchmarks self-supervised models on speech recognition for 143 languages. This evaluation is split across 2 data tracks: a 10-minute track and 1-hour track, which corresponds to the amount of labeled data used to finetune the model *per language*. Within each track is several training settings. The monolingual setting tests the model on monolingual ASR for 13 languages separately. The multilingual setting evaluates the model on language identification (LID), multilingual ASR, and joint LID+ASR on all 143 languages. + +While ML-SUPERB had the highest language coverage yet of any speech benchmark, it is far from the ~8000 languages spoken around the world. Further growing this coverage, however, is no simple task. Paired speech/text data is expensive to obtain, particularly for languages with smaller populations. Data quality is also a concern, as the findings that can be gleamed from the benchmark rely upon the reliability of its data sources. **Given these challenges, how can we extend speech technologies to new languages?** We believed that the solution laid in community-driven efforts, integrating the work of researchers across the globe. The ML-SUPERB Challenge was thus born, inviting researchers to contribute corpora for new languages and design new methods for multilingual speech processing. + +{% include figure.html path="assets/img/blog/mlsuperb_map.png" class="img-fluid rounded z-depth-0" zoomable=true %} +
+Geographical distributions of the languages submitted to the ML-SUPERB Challenge. +
+ +**In total, we received 54 languages submitted to the challenge, increasing the number of unique languages in the benchmark to 154.** A few of the new languages added include Quechua and Taiwanese Hokkien. While some submitted languages overlapped with those originally in the benchmark, they extended the corpora to new coversational, dialectal, and recording scenarios. We used these submissions to construct a hidden set for ML-SUPERB, which was used to further evaluate new and existing self-supervised models. Importantly, the new hidden set mostly consists of *conversational* speech, whereas the existing public set was mostly *read speech*. We found that model performance could vary significantly between the two regimes, showing that further work is necessary to build truly universal speech representations. + + +## What's Next? + +- We are in the process of releasing the YODAS dataset, which is its own challenge due to the size (100+TB!) +- Our next generation of models will combine all of these works togther! We plan to combine integrate SSL pre-training into OWSM, and then train it on YODAS + more data. +- ML-SUPERB will be extended to even more languages and tasks. + diff --git a/_posts/2023-12-31-reading-group.md b/_posts/2023-12-31-reading-group.md index f33e3cd5..4e364a86 100644 --- a/_posts/2023-12-31-reading-group.md +++ b/_posts/2023-12-31-reading-group.md @@ -38,3 +38,15 @@ comments: false - [Efficient Transformers with Dynamic Token Pooling](https://arxiv.org/abs/2211.09761) - [A Simple Concatenation can Effectively Improve Speech Translation](https://aclanthology.org/2023.acl-short.153/) - [CTC-based Non-autoregressive Speech Translation](https://aclanthology.org/2023.acl-long.744/) + +### 2023.10.10 ACL 2023 Paper List +- [When Does Translation Require Context? A Data-driven, Multilingual Exploration](https://aclanthology.org/2023.acl-long.36/) +- [Introducing Semantics into Speech Encoders](https://aclanthology.org/2023.acl-long.639/) +- [Pre-Training to Learn in Context](https://aclanthology.org/2023.acl-long.267.pdf) +- [Learning Language-Specific Layers for Multilingual Machine Translation](https://aclanthology.org/2023.acl-long.825/) +- [Finding the Pillars of Strength for Multi-Head Attention](https://aclanthology.org/2023.acl-long.812/) + +### 2023.11.7 WASPAA 2023 Paper List +- [Differentiable Representation of Warping based on Lie Group Theory](https://ieeexplore.ieee.org/document/10248099) +- [A Differentiable Image Source Model for Room Acoustics Optimization](https://ieeexplore.ieee.org/document/10248140) +- [Yet Another Generative Model For Room Impulse Response Estimation](https://ieeexplore.ieee.org/document/10248189) diff --git a/_posts/image.png b/_posts/image.png new file mode 100644 index 00000000..5ad083e6 Binary files /dev/null and b/_posts/image.png differ diff --git a/_site/assets/bibliography/2018-12-22-distill.bib b/_site/assets/bibliography/2018-12-22-distill.bib deleted file mode 100644 index 2b06f3c9..00000000 --- a/_site/assets/bibliography/2018-12-22-distill.bib +++ /dev/null @@ -1,7 +0,0 @@ -@article{gregor2015draw, - title={DRAW: A recurrent neural network for image generation}, - author={Gregor, Karol and Danihelka, Ivo and Graves, Alex and Rezende, Danilo Jimenez and Wierstra, Daan}, - journal={arXiv preprint, arXiv:1502.04623}, - year={2015}, - url={https://arxiv.org/pdf/1502.04623.pdf} -} diff --git a/_site/assets/css/main.css.map b/_site/assets/css/main.css.map index e236371d..3757cb3c 100644 --- a/_site/assets/css/main.css.map +++ b/_site/assets/css/main.css.map @@ -11,11 +11,11 @@ ], "sourcesContent": [ "@charset \"utf-8\";\n\n// Dimensions\n$max-content-width: 1000px;\n\n@import\n \"variables\",\n \"themes\",\n \"layout\",\n \"base\",\n \"distill\"\n;\n\n", - "/*******************************************************************************\n * Variables used throughout the theme.\n * To adjust anything, simply edit the variables below and rebuild the theme.\n ******************************************************************************/\n\n\n// Colors\n$red-color: #FF3636 !default;\n$red-color-dark: #B71C1C !default;\n$orange-color: #F29105 !default;\n$blue-color: #0076df !default;\n$blue-color-dark: #00369f !default;\n$cyan-color: #2698BA !default;\n$light-cyan-color: lighten($cyan-color, 25%);\n$green-color: #00ab37 !default;\n$green-color-lime: #B7D12A !default;\n$green-color-dark: #009f06 !default;\n$green-color-light: #ddffdd !default;\n$green-color-bright: #11D68B !default;\n$purple-color: #B509AC !default;\n$light-purple-color: lighten($purple-color, 25%);\n$pink-color: #f92080 !default;\n$pink-color-light: #ffdddd !default;\n$yellow-color: #efcc00 !default;\n\n$grey-color: #828282 !default;\n$grey-color-light: lighten($grey-color, 40%);\n$grey-color-dark: darken($grey-color, 25%);\n\n$white-color: #ffffff !default;\n$black-color: #000000 !default;\n\n\n// Theme colors\n\n$code-bg-color-light: rgba($purple-color, 0.05);\n$code-bg-color-dark: #2c3237 !default;\n\n", + "/*******************************************************************************\r\n * Variables used throughout the theme.\r\n * To adjust anything, simply edit the variables below and rebuild the theme.\r\n ******************************************************************************/\r\n\r\n\r\n// Colors\r\n$red-color: #FF3636 !default;\r\n$red-color-dark: #B71C1C !default;\r\n$orange-color: #F29105 !default;\r\n$blue-color: #0076df !default;\r\n$blue-color-dark: #00369f !default;\r\n$cyan-color: #2698BA !default;\r\n$light-cyan-color: lighten($cyan-color, 25%);\r\n$green-color: #00ab37 !default;\r\n$green-color-lime: #B7D12A !default;\r\n$green-color-dark: #009f06 !default;\r\n$green-color-light: #ddffdd !default;\r\n$green-color-bright: #11D68B !default;\r\n$purple-color: #B509AC !default;\r\n$light-purple-color: lighten($purple-color, 25%);\r\n$pink-color: #f92080 !default;\r\n$pink-color-light: #ffdddd !default;\r\n$yellow-color: #efcc00 !default;\r\n\r\n$grey-color: #828282 !default;\r\n$grey-color-light: lighten($grey-color, 40%);\r\n$grey-color-dark: darken($grey-color, 25%);\r\n\r\n$white-color: #ffffff !default;\r\n$black-color: #000000 !default;\r\n\r\n\r\n// Theme colors\r\n\r\n$code-bg-color-light: rgba($purple-color, 0.05);\r\n$code-bg-color-dark: #2c3237 !default;\r\n\r\n", "/*******************************************************************************\r\n * Themes\r\n ******************************************************************************/\r\n \r\n:root {\r\n --global-bg-color: #{$white-color};\r\n --global-code-bg-color: #{$code-bg-color-light};\r\n --global-text-color: #{$black-color};\r\n --global-text-color-light: #{$grey-color};\r\n --global-theme-color: #{$purple-color};\r\n --global-hover-color: #{$purple-color};\r\n --global-footer-bg-color: #{$grey-color-dark};\r\n --global-footer-text-color: #{$grey-color-light};\r\n --global-footer-link-color: #{$white-color};\r\n --global-distill-app-color: #{$grey-color};\r\n\r\n .fa-sun {\r\n display : none;\r\n }\r\n .fa-moon {\r\n padding-left: 10px;\r\n padding-top: 12px;\r\n display : block;\r\n }\r\n}\r\n\r\nhtml[data-theme='dark'] {\r\n --global-bg-color: #{$grey-color-dark};\r\n --global-code-bg-color: #{$code-bg-color-dark};\r\n --global-text-color: #{$grey-color-light};\r\n --global-text-color-light: #{$grey-color-light};\r\n --global-theme-color: #{$cyan-color};\r\n --global-hover-color: #{$cyan-color};\r\n --global-footer-bg-color: #{$grey-color-light};\r\n --global-footer-text-color: #{$grey-color-dark};\r\n --global-footer-link-color: #{$black-color};\r\n --global-distill-app-color: #{$grey-color-light};\r\n\r\n .fa-sun {\r\n padding-left: 10px;\r\n padding-top: 12px;\r\n display : block;\r\n }\r\n .fa-moon {\r\n display : none;\r\n }\r\n}\r\n", - "/******************************************************************************\n * Content\n ******************************************************************************/\n\n body {\n padding-bottom: 70px;\n color: var(--global-text-color);\n background-color: var(--global-bg-color);\n }\n\n body.fixed-top-nav {\n // Add some padding for the nav-bar.\n padding-top: 56px;\n }\n\n body.sticky-bottom-footer {\n // Remove padding below footer.\n padding-bottom: 0;\n }\n\n.container {\n max-width: $max-content-width;\n}\n\n// Profile\n.profile {\n img {\n width: 100%;\n }\n}\n\n// TODO: redefine content layout.\n\n\n/******************************************************************************\n * Publications\n ******************************************************************************/\n\n// TODO: redefine publications layout.\n\n\n/*****************************************************************************\n* Projects\n*****************************************************************************/\n\n// TODO: redefine projects layout.\n", - "/*******************************************************************************\n * Styles for the base elements of the theme.\n ******************************************************************************/\n\n// Typography\n\np, h1, h2, h3, h4, h5, h6, em, div, span, strong {\n color: var(--global-text-color);\n}\n\na, table.table a {\n color: var(--global-theme-color);\n &:hover {\n color: var(--global-theme-color);\n text-decoration: underline;\n }\n &:hover:after {\n width: 100%;\n }\n}\n\n// Math\n\n.equation {\n margin-bottom: 1rem;\n text-align: center;\n}\n\n// Caption\n\n.caption {\n font-size: 0.875rem;\n margin-top: 0.75rem;\n margin-bottom: 1.5rem;\n text-align: center;\n}\n\n// Citation\n.citation, .citation-number {\n color: var(--global-theme-color);\n}\n\n// Profile\n\n.profile {\n margin-left: 1rem;\n width: 100%;\n\n .address {\n margin-bottom: 5px;\n margin-top: 5px;\n font-family: monospace;\n font-size: 1.2rem;\n p {\n display: inline-block;\n margin: 0;\n }\n }\n}\n\n@media (min-width: 576px) {\n .profile {\n width: 30%;\n .address {\n p { display: block; }\n }\n }\n}\n\n.post-description {\n margin-bottom: 2rem;\n font-size: 0.875rem;\n a {\n color: inherit;\n &:hover {\n color: var(--global-theme-color);\n text-decoration: none;\n }\n }\n}\n\n\n// Navbar customization\n\n.navbar {\n box-shadow: none;\n border-bottom: 1px solid $grey-color-light;\n background-color: var(--global-bg-color);\n opacity: 0.95;\n}\n.navbar.navbar-light {\n a {\n &:hover {\n text-decoration: none;\n }\n }\n .navbar-brand {\n color: var(--global-text-color);\n }\n .navbar-nav .nav-item .nav-link {\n color: var(--global-text-color);\n &:hover {\n color: var(--global-hover-color);\n }\n }\n .navbar-nav .nav-item.active>.nav-link {\n background-color: inherit;\n font-weight: bolder;\n color: var(--global-theme-color);\n &:hover {\n color: var(--global-hover-color);\n }\n }\n .navbar-brand.social {\n padding-bottom: 0;\n padding-top: 0;\n font-size: 1.7rem;\n a {\n i::before {\n color: var(--global-text-color);\n -webkit-transition: all 0.2s ease-in-out;\n }\n &:hover {\n i::before {\n color: var(--global-theme-color);\n }\n }\n }\n }\n}\n\n.navbar-toggler {\n .icon-bar {\n display: block;\n width: 22px;\n height: 2px;\n background-color: var(--global-text-color);\n border-radius: 1px;\n margin-bottom: 4px;\n transition: all 0.2s;\n }\n .top-bar {\n transform: rotate(45deg);\n transform-origin: 10% 10%;\n }\n .middle-bar {\n opacity: 0;\n }\n .bottom-bar {\n transform: rotate(-45deg);\n transform-origin: 10% 90%;\n }\n}\n\n.navbar-toggler.collapsed {\n .top-bar {\n transform: rotate(0);\n }\n .middle-bar {\n opacity: 1;\n }\n .bottom-bar {\n transform: rotate(0);\n }\n}\n\n\n// News\n\n.news table td {\n font-size: 1rem;\n color: var(--global-text-color);\n}\n\n.news table th {\n color: var(--global-text-color);\n}\n\n// Social (bottom)\n\n.social {\n text-align: center;\n .contact-icons {\n font-size: 4rem;\n a {\n i::before {\n color: var(--global-text-color);\n -webkit-transition: all 0.2s ease-in-out;\n }\n &:hover {\n i::before {\n color: var(--global-theme-color);\n }\n }\n }\n }\n .contact-note {\n font-size: 0.8rem;\n }\n}\n\n\n// Footer\nfooter.fixed-bottom {\n background-color: var(--global-footer-bg-color);\n font-size: 0.75rem;\n .container {\n color: var(--global-footer-text-color);\n padding-top: 9px;\n padding-bottom: 8px;\n }\n a {\n color: var(--global-footer-link-color);\n &:hover {\n color: var(--global-theme-color);\n text-decoration: none;\n }\n }\n}\n\nfooter.sticky-bottom {\n border-top: 1px solid $grey-color-light;\n padding-top: 40px;\n padding-bottom: 40px;\n font-size: 0.9rem;\n}\n\n\n// Blog\n\n.header-bar {\n border-bottom: 1px solid $grey-color-light;\n text-align: center;\n padding-top: 2rem;\n padding-bottom: 5rem;\n h1 {\n color: var(--global-theme-color);\n font-size: 5rem;\n }\n}\n\n.post-list {\n margin: 0;\n margin-bottom: 40px;\n padding: 0;\n li {\n border-bottom: 1px solid $grey-color-light;\n list-style: none;\n padding-top: 2rem;\n padding-bottom: 2rem;\n .post-meta {\n color: var(--global-text-color-light);\n font-size: 0.875rem;\n margin-bottom: 0;\n }\n a {\n color: var(--global-text-color);\n text-decoration: none;\n &:hover {\n color: var(--global-theme-color);\n }\n }\n }\n}\n\n.pagination {\n .page-item {\n .page-link {\n color: var(--global-text-color);\n &:hover {\n color: $black-color;\n }\n }\n &.active .page-link {\n color: $white-color;\n background-color: var(--global-theme-color);\n &:hover {\n background-color: var(--global-theme-color);\n }\n }\n }\n}\n\n\n// Distill\n\n.distill {\n a:hover {\n border-bottom-color: var(--global-theme-color);\n text-decoration: none;\n }\n}\n\n\n// Projects\n\n.projects {\n .card-item {\n width: auto;\n margin-bottom: 10px;\n\n a {\n text-decoration: none;\n }\n\n .row {\n display: flex;\n align-items: center;\n }\n\n .card {\n img {\n width: 100%;\n }\n }\n }\n\n .grid-item {\n width: 250px;\n margin-bottom: 10px;\n\n a {\n color: black;\n text-decoration: none;\n\n &:hover {\n color: var(--global-theme-color);\n }\n }\n\n .card {\n img {\n width: 100%;\n }\n .card-title {\n color: $black-color;\n }\n }\n }\n\n h2.category {\n color: $grey-color-light;\n border-bottom: 1px solid $grey-color-light;\n padding-top: 0.5rem;\n margin-top: 2rem;\n margin-bottom: 1rem;\n text-align: right;\n }\n}\n\n\n// Publications\n\n.publications {\n margin-top: 2rem;\n h1 {\n color: var(--global-theme-color);\n font-size: 2rem;\n text-align: center;\n margin-top: 1em;\n margin-bottom: 1em;\n }\n h2 {\n margin-bottom: 1rem;\n span {\n font-size: 1.5rem;\n }\n }\n h2.year {\n color: $grey-color-light;\n border-top: 1px solid $grey-color-light;\n padding-top: 1rem;\n margin-top: 2rem;\n margin-bottom: -2rem;\n text-align: right;\n }\n ol.bibliography {\n list-style: none;\n padding: 0;\n margin-top: 0;\n\n li {\n margin-bottom: 1rem;\n .abbr {\n height: 2rem;\n margin-bottom: 0.5rem;\n abbr {\n display: inline-block;\n background-color: var(--global-theme-color);\n padding-left: 1rem;\n padding-right: 1rem;\n a {\n color: white;\n &:hover {\n text-decoration: none;\n }\n }\n }\n .award {\n color: var(--global-theme-color) !important;\n border: 1px solid var(--global-theme-color);\n }\n }\n .title {\n font-weight: bolder;\n }\n .author {\n a {\n border-bottom: 1px dashed var(--global-theme-color);\n &:hover {\n border-bottom-style: solid;\n text-decoration: none;\n }\n }\n > em {\n border-bottom: 1px solid;\n font-style: normal;\n }\n }\n .links {\n a.btn {\n color: var(--global-text-color);\n border: 1px solid var(--global-text-color);\n padding-left: 1rem;\n padding-right: 1rem;\n padding-top: 0.25rem;\n padding-bottom: 0.25rem;\n &:hover {\n color: var(--global-theme-color);\n border-color: var(--global-theme-color);\n }\n }\n }\n .hidden {\n font-size: 0.875rem;\n max-height: 0px;\n overflow: hidden;\n text-align: justify;\n -webkit-transition: 0.15s ease;\n -moz-transition: 0.15s ease;\n -ms-transition: 0.15s ease;\n -o-transition: 0.15s ease;\n transition: all 0.15s ease;\n\n p {\n line-height: 1.4em;\n margin: 10px;\n }\n pre {\n font-size: 1em;\n line-height: 1.4em;\n padding: 10px;\n }\n }\n .hidden.open {\n max-height: 100em;\n -webkit-transition: 0.15s ease;\n -moz-transition: 0.15s ease;\n -ms-transition: 0.15s ease;\n -o-transition: 0.15s ease;\n transition: all 0.15s ease;\n }\n div.abstract.hidden {\n border: dashed 1px var(--global-bg-color);\n }\n div.abstract.hidden.open {\n border-color: var(--global-text-color);\n }\n }\n }\n}\n\n// Rouge Color Customization\nfigure.highlight {\n margin: 0 0 1rem;\n}\n\npre {\n color: var(--global-theme-color);\n background-color: var(--global-code-bg-color);\n border-radius: 6px;\n padding: 6px 12px;\n pre, code {\n background-color: transparent;\n border-radius: 0;\n padding: 0;\n }\n}\n\ncode {\n color: var(--global-theme-color);\n background-color: var(--global-code-bg-color);\n border-radius: 3px;\n padding: 3px 3px;\n}\n\n\n// Transitioning Themes\nhtml.transition,\nhtml.transition *,\nhtml.transition *:before,\nhtml.transition *:after {\n transition: all 750ms !important;\n transition-delay: 0 !important;\n}\n", - "/*******************************************************************************\n * Style overrides for distill blog posts.\n ******************************************************************************/\n\nd-byline {\n border-top-color: $grey-color-light !important;\n}\n\nd-byline h3 {\n color: var(--global-text-color) !important;\n}\n\nd-byline a, d-article d-byline a {\n color: var(--global-text-color) !important;\n &:hover {\n color: var(--global-hover-color) !important;\n }\n}\n\nd-article {\n border-top-color: #e8e8e8 !important;\n a, p, h1, h2, h3, h4, h5, h6 {\n color: var(--global-text-color) !important;\n }\n a, h1, h2, hr {\n border-bottom-color: $grey-color-light !important;\n }\n a:hover {\n border-bottom-color: var(--global-hover-color) !important;\n }\n}\n\nd-appendix {\n border-top-color: $grey-color-light !important;\n color: var(--global-distill-app-color) !important;\n h3, li, span {\n color: var(--global-distill-app-color) !important;\n }\n a, a.footnote-backlink {\n color: var(--global-distill-app-color) !important;\n &:hover {\n color: var(--global-hover-color) !important;\n }\n }\n}\n" + "/******************************************************************************\r\n * Content\r\n ******************************************************************************/\r\n\r\n body {\r\n padding-bottom: 70px;\r\n color: var(--global-text-color);\r\n background-color: var(--global-bg-color);\r\n }\r\n\r\n body.fixed-top-nav {\r\n // Add some padding for the nav-bar.\r\n padding-top: 56px;\r\n }\r\n\r\n body.sticky-bottom-footer {\r\n // Remove padding below footer.\r\n padding-bottom: 0;\r\n }\r\n\r\n.container {\r\n max-width: $max-content-width;\r\n}\r\n\r\n// Profile\r\n.profile {\r\n img {\r\n width: 100%;\r\n }\r\n}\r\n\r\n// TODO: redefine content layout.\r\n\r\n\r\n/******************************************************************************\r\n * Publications\r\n ******************************************************************************/\r\n\r\n// TODO: redefine publications layout.\r\n\r\n\r\n/*****************************************************************************\r\n* Projects\r\n*****************************************************************************/\r\n\r\n// TODO: redefine projects layout.\r\n", + "/*******************************************************************************\r\n * Styles for the base elements of the theme.\r\n ******************************************************************************/\r\n\r\n// Typography\r\n\r\np, h1, h2, h3, h4, h5, h6, em, div, span, strong {\r\n color: var(--global-text-color);\r\n}\r\n\r\na, table.table a {\r\n color: var(--global-theme-color);\r\n &:hover {\r\n color: var(--global-theme-color);\r\n text-decoration: underline;\r\n }\r\n &:hover:after {\r\n width: 100%;\r\n }\r\n}\r\n\r\n// Math\r\n\r\n.equation {\r\n margin-bottom: 1rem;\r\n text-align: center;\r\n}\r\n\r\n// Caption\r\n\r\n.caption {\r\n font-size: 0.875rem;\r\n margin-top: 0.75rem;\r\n margin-bottom: 1.5rem;\r\n text-align: center;\r\n}\r\n\r\n// Citation\r\n.citation, .citation-number {\r\n color: var(--global-theme-color);\r\n}\r\n\r\n// Profile\r\n\r\n.profile {\r\n margin-left: 1rem;\r\n width: 100%;\r\n\r\n .address {\r\n margin-bottom: 5px;\r\n margin-top: 5px;\r\n font-family: monospace;\r\n font-size: 1.2rem;\r\n p {\r\n display: inline-block;\r\n margin: 0;\r\n }\r\n }\r\n}\r\n\r\n@media (min-width: 576px) {\r\n .profile {\r\n width: 30%;\r\n .address {\r\n p { display: block; }\r\n }\r\n }\r\n}\r\n\r\n.post-description {\r\n margin-bottom: 2rem;\r\n font-size: 0.875rem;\r\n a {\r\n color: inherit;\r\n &:hover {\r\n color: var(--global-theme-color);\r\n text-decoration: none;\r\n }\r\n }\r\n}\r\n\r\n\r\n// Navbar customization\r\n\r\n.navbar {\r\n box-shadow: none;\r\n border-bottom: 1px solid $grey-color-light;\r\n background-color: var(--global-bg-color);\r\n opacity: 0.95;\r\n}\r\n.navbar.navbar-light {\r\n a {\r\n &:hover {\r\n text-decoration: none;\r\n }\r\n }\r\n .navbar-brand {\r\n color: var(--global-text-color);\r\n }\r\n .navbar-nav .nav-item .nav-link {\r\n color: var(--global-text-color);\r\n &:hover {\r\n color: var(--global-hover-color);\r\n }\r\n }\r\n .navbar-nav .nav-item.active>.nav-link {\r\n background-color: inherit;\r\n font-weight: bolder;\r\n color: var(--global-theme-color);\r\n &:hover {\r\n color: var(--global-hover-color);\r\n }\r\n }\r\n .navbar-brand.social {\r\n padding-bottom: 0;\r\n padding-top: 0;\r\n font-size: 1.7rem;\r\n a {\r\n i::before {\r\n color: var(--global-text-color);\r\n -webkit-transition: all 0.2s ease-in-out;\r\n }\r\n &:hover {\r\n i::before {\r\n color: var(--global-theme-color);\r\n }\r\n }\r\n }\r\n }\r\n}\r\n\r\n.navbar-toggler {\r\n .icon-bar {\r\n display: block;\r\n width: 22px;\r\n height: 2px;\r\n background-color: var(--global-text-color);\r\n border-radius: 1px;\r\n margin-bottom: 4px;\r\n transition: all 0.2s;\r\n }\r\n .top-bar {\r\n transform: rotate(45deg);\r\n transform-origin: 10% 10%;\r\n }\r\n .middle-bar {\r\n opacity: 0;\r\n }\r\n .bottom-bar {\r\n transform: rotate(-45deg);\r\n transform-origin: 10% 90%;\r\n }\r\n}\r\n\r\n.navbar-toggler.collapsed {\r\n .top-bar {\r\n transform: rotate(0);\r\n }\r\n .middle-bar {\r\n opacity: 1;\r\n }\r\n .bottom-bar {\r\n transform: rotate(0);\r\n }\r\n}\r\n\r\n\r\n// News\r\n\r\n.news table td {\r\n font-size: 1rem;\r\n color: var(--global-text-color);\r\n}\r\n\r\n.news table th {\r\n color: var(--global-text-color);\r\n}\r\n\r\n// Social (bottom)\r\n\r\n.social {\r\n text-align: center;\r\n .contact-icons {\r\n font-size: 4rem;\r\n a {\r\n i::before {\r\n color: var(--global-text-color);\r\n -webkit-transition: all 0.2s ease-in-out;\r\n }\r\n &:hover {\r\n i::before {\r\n color: var(--global-theme-color);\r\n }\r\n }\r\n }\r\n }\r\n .contact-note {\r\n font-size: 0.8rem;\r\n }\r\n}\r\n\r\n\r\n// Footer\r\nfooter.fixed-bottom {\r\n background-color: var(--global-footer-bg-color);\r\n font-size: 0.75rem;\r\n .container {\r\n color: var(--global-footer-text-color);\r\n padding-top: 9px;\r\n padding-bottom: 8px;\r\n }\r\n a {\r\n color: var(--global-footer-link-color);\r\n &:hover {\r\n color: var(--global-theme-color);\r\n text-decoration: none;\r\n }\r\n }\r\n}\r\n\r\nfooter.sticky-bottom {\r\n border-top: 1px solid $grey-color-light;\r\n padding-top: 40px;\r\n padding-bottom: 40px;\r\n font-size: 0.9rem;\r\n}\r\n\r\n\r\n// Blog\r\n\r\n.header-bar {\r\n border-bottom: 1px solid $grey-color-light;\r\n text-align: center;\r\n padding-top: 2rem;\r\n padding-bottom: 5rem;\r\n h1 {\r\n color: var(--global-theme-color);\r\n font-size: 5rem;\r\n }\r\n}\r\n\r\n.post-list {\r\n margin: 0;\r\n margin-bottom: 40px;\r\n padding: 0;\r\n li {\r\n border-bottom: 1px solid $grey-color-light;\r\n list-style: none;\r\n padding-top: 2rem;\r\n padding-bottom: 2rem;\r\n .post-meta {\r\n color: var(--global-text-color-light);\r\n font-size: 0.875rem;\r\n margin-bottom: 0;\r\n }\r\n a {\r\n color: var(--global-text-color);\r\n text-decoration: none;\r\n &:hover {\r\n color: var(--global-theme-color);\r\n }\r\n }\r\n }\r\n}\r\n\r\n.pagination {\r\n .page-item {\r\n .page-link {\r\n color: var(--global-text-color);\r\n &:hover {\r\n color: $black-color;\r\n }\r\n }\r\n &.active .page-link {\r\n color: $white-color;\r\n background-color: var(--global-theme-color);\r\n &:hover {\r\n background-color: var(--global-theme-color);\r\n }\r\n }\r\n }\r\n}\r\n\r\n\r\n// Distill\r\n\r\n.distill {\r\n a:hover {\r\n border-bottom-color: var(--global-theme-color);\r\n text-decoration: none;\r\n }\r\n}\r\n\r\n\r\n// Projects\r\n\r\n.projects {\r\n .card-item {\r\n width: auto;\r\n margin-bottom: 10px;\r\n\r\n a {\r\n text-decoration: none;\r\n }\r\n\r\n .row {\r\n display: flex;\r\n align-items: center;\r\n }\r\n\r\n .card {\r\n img {\r\n width: 100%;\r\n }\r\n }\r\n }\r\n\r\n .grid-item {\r\n width: 250px;\r\n margin-bottom: 10px;\r\n\r\n a {\r\n color: black;\r\n text-decoration: none;\r\n\r\n &:hover {\r\n color: var(--global-theme-color);\r\n }\r\n }\r\n\r\n .card {\r\n img {\r\n width: 100%;\r\n }\r\n .card-title {\r\n color: $black-color;\r\n }\r\n }\r\n }\r\n\r\n h2.category {\r\n color: $grey-color-light;\r\n border-bottom: 1px solid $grey-color-light;\r\n padding-top: 0.5rem;\r\n margin-top: 2rem;\r\n margin-bottom: 1rem;\r\n text-align: right;\r\n }\r\n}\r\n\r\n\r\n// Publications\r\n\r\n.publications {\r\n margin-top: 2rem;\r\n h1 {\r\n color: var(--global-theme-color);\r\n font-size: 2rem;\r\n text-align: center;\r\n margin-top: 1em;\r\n margin-bottom: 1em;\r\n }\r\n h2 {\r\n margin-bottom: 1rem;\r\n span {\r\n font-size: 1.5rem;\r\n }\r\n }\r\n h2.year {\r\n color: $grey-color-light;\r\n border-top: 1px solid $grey-color-light;\r\n padding-top: 1rem;\r\n margin-top: 2rem;\r\n margin-bottom: -2rem;\r\n text-align: right;\r\n }\r\n ol.bibliography {\r\n list-style: none;\r\n padding: 0;\r\n margin-top: 0;\r\n\r\n li {\r\n margin-bottom: 1rem;\r\n .abbr {\r\n height: 2rem;\r\n margin-bottom: 0.5rem;\r\n abbr {\r\n display: inline-block;\r\n background-color: var(--global-theme-color);\r\n padding-left: 1rem;\r\n padding-right: 1rem;\r\n a {\r\n color: white;\r\n &:hover {\r\n text-decoration: none;\r\n }\r\n }\r\n }\r\n .award {\r\n color: var(--global-theme-color) !important;\r\n border: 1px solid var(--global-theme-color);\r\n }\r\n }\r\n .title {\r\n font-weight: bolder;\r\n }\r\n .author {\r\n a {\r\n border-bottom: 1px dashed var(--global-theme-color);\r\n &:hover {\r\n border-bottom-style: solid;\r\n text-decoration: none;\r\n }\r\n }\r\n > em {\r\n border-bottom: 1px solid;\r\n font-style: normal;\r\n }\r\n }\r\n .links {\r\n a.btn {\r\n color: var(--global-text-color);\r\n border: 1px solid var(--global-text-color);\r\n padding-left: 1rem;\r\n padding-right: 1rem;\r\n padding-top: 0.25rem;\r\n padding-bottom: 0.25rem;\r\n &:hover {\r\n color: var(--global-theme-color);\r\n border-color: var(--global-theme-color);\r\n }\r\n }\r\n }\r\n .hidden {\r\n font-size: 0.875rem;\r\n max-height: 0px;\r\n overflow: hidden;\r\n text-align: justify;\r\n -webkit-transition: 0.15s ease;\r\n -moz-transition: 0.15s ease;\r\n -ms-transition: 0.15s ease;\r\n -o-transition: 0.15s ease;\r\n transition: all 0.15s ease;\r\n\r\n p {\r\n line-height: 1.4em;\r\n margin: 10px;\r\n }\r\n pre {\r\n font-size: 1em;\r\n line-height: 1.4em;\r\n padding: 10px;\r\n }\r\n }\r\n .hidden.open {\r\n max-height: 100em;\r\n -webkit-transition: 0.15s ease;\r\n -moz-transition: 0.15s ease;\r\n -ms-transition: 0.15s ease;\r\n -o-transition: 0.15s ease;\r\n transition: all 0.15s ease;\r\n }\r\n div.abstract.hidden {\r\n border: dashed 1px var(--global-bg-color);\r\n }\r\n div.abstract.hidden.open {\r\n border-color: var(--global-text-color);\r\n }\r\n }\r\n }\r\n}\r\n\r\n// Rouge Color Customization\r\nfigure.highlight {\r\n margin: 0 0 1rem;\r\n}\r\n\r\npre {\r\n color: var(--global-theme-color);\r\n background-color: var(--global-code-bg-color);\r\n border-radius: 6px;\r\n padding: 6px 12px;\r\n pre, code {\r\n background-color: transparent;\r\n border-radius: 0;\r\n padding: 0;\r\n }\r\n}\r\n\r\ncode {\r\n color: var(--global-theme-color);\r\n background-color: var(--global-code-bg-color);\r\n border-radius: 3px;\r\n padding: 3px 3px;\r\n}\r\n\r\n\r\n// Transitioning Themes\r\nhtml.transition,\r\nhtml.transition *,\r\nhtml.transition *:before,\r\nhtml.transition *:after {\r\n transition: all 750ms !important;\r\n transition-delay: 0 !important;\r\n}\r\n", + "/*******************************************************************************\r\n * Style overrides for distill blog posts.\r\n ******************************************************************************/\r\n\r\nd-byline {\r\n border-top-color: $grey-color-light !important;\r\n}\r\n\r\nd-byline h3 {\r\n color: var(--global-text-color) !important;\r\n}\r\n\r\nd-byline a, d-article d-byline a {\r\n color: var(--global-text-color) !important;\r\n &:hover {\r\n color: var(--global-hover-color) !important;\r\n }\r\n}\r\n\r\nd-article {\r\n border-top-color: #e8e8e8 !important;\r\n a, p, h1, h2, h3, h4, h5, h6 {\r\n color: var(--global-text-color) !important;\r\n }\r\n a, h1, h2, hr {\r\n border-bottom-color: $grey-color-light !important;\r\n }\r\n a:hover {\r\n border-bottom-color: var(--global-hover-color) !important;\r\n }\r\n}\r\n\r\nd-appendix {\r\n border-top-color: $grey-color-light !important;\r\n color: var(--global-distill-app-color) !important;\r\n h3, li, span {\r\n color: var(--global-distill-app-color) !important;\r\n }\r\n a, a.footnote-backlink {\r\n color: var(--global-distill-app-color) !important;\r\n &:hover {\r\n color: var(--global-hover-color) !important;\r\n }\r\n }\r\n}\r\n" ], "names": [], "mappings": "ACAA,mMAGgF;ACHhF,0FAEgF;CAE/E,AAAD,IAAK,CAAC,EACJ,iBAAiB,CAAA,QAAC,EAClB,sBAAsB,CAAA,wBAAC,EACvB,mBAAmB,CAAA,QAAC,EACpB,yBAAyB,CAAA,QAAC,EAC1B,oBAAoB,CAAA,QAAC,EACrB,oBAAoB,CAAA,QAAC,EACrB,wBAAwB,CAAA,QAAC,EACzB,0BAA0B,CAAA,QAAC,EAC3B,0BAA0B,CAAA,QAAC,EAC3B,0BAA0B,CAAA,QAAC,GAU5B;;CApBA,AAYC,IAZG,CAYH,OAAO,CAAC,EACN,OAAO,EAAG,IAAI,GACf;;CAdF,AAeC,IAfG,CAeH,QAAQ,CAAC,EACP,YAAY,EAAE,IAAI,EAClB,WAAW,EAAE,IAAI,EACjB,OAAO,EAAG,KAAK,GAChB;;AAGH,AAAA,IAAI,CAAA,AAAA,UAAC,CAAW,MAAM,AAAjB,EAAmB,EACtB,iBAAiB,CAAA,QAAC,EAClB,sBAAsB,CAAA,QAAC,EACvB,mBAAmB,CAAA,QAAC,EACpB,yBAAyB,CAAA,QAAC,EAC1B,oBAAoB,CAAA,QAAC,EACrB,oBAAoB,CAAA,QAAC,EACrB,wBAAwB,CAAA,QAAC,EACzB,0BAA0B,CAAA,QAAC,EAC3B,0BAA0B,CAAA,QAAC,EAC3B,0BAA0B,CAAA,QAAC,GAU5B;;AApBD,AAYE,IAZE,CAAA,AAAA,UAAC,CAAW,MAAM,AAAjB,EAYH,OAAO,CAAC,EACN,YAAY,EAAE,IAAI,EAClB,WAAW,EAAE,IAAI,EACjB,OAAO,EAAG,KAAK,GAChB;;AAhBH,AAiBE,IAjBE,CAAA,AAAA,UAAC,CAAW,MAAM,AAAjB,EAiBH,QAAQ,CAAC,EACP,OAAO,EAAG,IAAI,GACf;;AC7CH,0FAEgF;AAE/E,AAAA,IAAI,CAAC,EACH,cAAc,EAAE,IAAI,EACpB,KAAK,EAAE,wBAAwB,EAC/B,gBAAgB,EAAE,sBAAsB,GACzC;;AAED,AAAA,IAAI,AAAA,cAAc,CAAC,EAEjB,WAAW,EAAE,IAAI,GAClB;;AAED,AAAA,IAAI,AAAA,qBAAqB,CAAC,EAExB,cAAc,EAAE,CAAC,GAClB;;AAEF,AAAA,UAAU,CAAC,EACT,SAAS,EHlBS,MAAM,GGmBzB;;AAGD,AACE,QADM,CACN,GAAG,CAAC,EACF,KAAK,EAAE,IAAI,GACZ;;AAMH,+FAEgF;AAKhF,0FAE8E;AC3C9E,8HAEgF;AAIhF,AAAA,CAAC,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,GAAG,EAAE,IAAI,EAAE,MAAM,CAAC,EAC/C,KAAK,EAAE,wBAAwB,GAChC;;AAED,AAAA,CAAC,EAAE,KAAK,AAAA,MAAM,CAAC,CAAC,CAAC,EACf,KAAK,EAAE,yBAAyB,GAQjC;;AATD,AAEE,CAFD,CAEG,KAAK,EAFN,KAAK,AAAA,MAAM,CAAC,CAAC,CAEZ,KAAK,CAAC,EACN,KAAK,EAAE,yBAAyB,EAChC,eAAe,EAAE,SAAS,GAC3B;;AALH,AAME,CAND,CAMG,KAAK,CAAC,KAAK,EANZ,KAAK,AAAA,MAAM,CAAC,CAAC,CAMZ,KAAK,CAAC,KAAK,CAAC,EACZ,KAAK,EAAE,IAAI,GACZ;;AAKH,AAAA,SAAS,CAAC,EACR,aAAa,EAAE,IAAI,EACnB,UAAU,EAAE,MAAM,GACnB;;AAID,AAAA,QAAQ,CAAC,EACP,SAAS,EAAE,QAAQ,EACnB,UAAU,EAAE,OAAO,EACnB,aAAa,EAAE,MAAM,EACrB,UAAU,EAAE,MAAM,GACnB;;AAGD,AAAA,SAAS,EAAE,gBAAgB,CAAC,EAC1B,KAAK,EAAE,yBAAyB,GACjC;;AAID,AAAA,QAAQ,CAAC,EACP,WAAW,EAAE,IAAI,EACjB,KAAK,EAAE,IAAI,GAYZ;;AAdD,AAIE,QAJM,CAIN,QAAQ,CAAC,EACP,aAAa,EAAE,GAAG,EAClB,UAAU,EAAE,GAAG,EACf,WAAW,EAAE,SAAS,EACtB,SAAS,EAAE,MAAM,GAKlB;;AAbH,AASI,QATI,CAIN,QAAQ,CAKN,CAAC,CAAC,EACA,OAAO,EAAE,YAAY,EACrB,MAAM,EAAE,CAAC,GACV;;AAIL,MAAM,mBACJ,GAAA,AAAA,QAAQ,CAAC,EACP,KAAK,EAAE,GAAG,GAIX,CALD,AAGI,QAHI,CAEN,QAAQ,CACN,CAAC,CAAC,EAAE,OAAO,EAAE,KAAK,GAAI,EAEzB;;AAGH,AAAA,iBAAiB,CAAC,EAChB,aAAa,EAAE,IAAI,EACnB,SAAS,EAAE,QAAQ,GAQpB;;AAVD,AAGE,iBAHe,CAGf,CAAC,CAAC,EACA,KAAK,EAAE,OAAO,GAKf;;AATH,AAKI,iBALa,CAGf,CAAC,CAEG,KAAK,CAAC,EACN,KAAK,EAAE,yBAAyB,EAChC,eAAe,EAAE,IAAI,GACtB;;AAOL,AAAA,OAAO,CAAC,EACN,UAAU,EAAE,IAAI,EAChB,aAAa,EAAE,GAAG,CAAC,KAAK,CH7DJ,OAAO,EG8D3B,gBAAgB,EAAE,sBAAsB,EACxC,OAAO,EAAE,IAAI,GACd;;AACD,AAEI,OAFG,AAAA,aAAa,CAClB,CAAC,CACG,KAAK,CAAC,EACN,eAAe,EAAE,IAAI,GACtB;;AAJL,AAME,OANK,AAAA,aAAa,CAMlB,aAAa,CAAC,EACZ,KAAK,EAAE,wBAAwB,GAChC;;AARH,AASE,OATK,AAAA,aAAa,CASlB,WAAW,CAAC,SAAS,CAAC,SAAS,CAAC,EAC9B,KAAK,EAAE,wBAAwB,GAIhC;;AAdH,AAWI,OAXG,AAAA,aAAa,CASlB,WAAW,CAAC,SAAS,CAAC,SAAS,CAE3B,KAAK,CAAC,EACN,KAAK,EAAE,yBAAyB,GACjC;;AAbL,AAeE,OAfK,AAAA,aAAa,CAelB,WAAW,CAAC,SAAS,AAAA,OAAO,GAAC,SAAS,CAAC,EACnC,gBAAgB,EAAE,OAAO,EACzB,WAAW,EAAE,MAAM,EACnB,KAAK,EAAE,yBAAyB,GAInC;;AAtBH,AAmBM,OAnBC,AAAA,aAAa,CAelB,WAAW,CAAC,SAAS,AAAA,OAAO,GAAC,SAAS,CAIhC,KAAK,CAAC,EACN,KAAK,EAAE,yBAAyB,GACjC;;AArBP,AAuBE,OAvBK,AAAA,aAAa,CAuBlB,aAAa,AAAA,OAAO,CAAC,EACnB,cAAc,EAAE,CAAC,EACjB,WAAW,EAAE,CAAC,EACd,SAAS,EAAE,MAAM,GAYlB;;AAtCH,AA4BM,OA5BC,AAAA,aAAa,CAuBlB,aAAa,AAAA,OAAO,CAIlB,CAAC,CACC,CAAC,EAAE,MAAM,CAAC,EACR,KAAK,EAAE,wBAAwB,EAC/B,kBAAkB,EAAE,oBAAoB,GACzC;;AA/BP,AAiCQ,OAjCD,AAAA,aAAa,CAuBlB,aAAa,AAAA,OAAO,CAIlB,CAAC,CAKG,KAAK,CACL,CAAC,EAAE,MAAM,CAAC,EACR,KAAK,EAAE,yBAAyB,GACjC;;AAMT,AACE,eADa,CACb,SAAS,CAAC,EACR,OAAO,EAAE,KAAK,EACd,KAAK,EAAE,IAAI,EACX,MAAM,EAAE,GAAG,EACX,gBAAgB,EAAE,wBAAwB,EAC1C,aAAa,EAAE,GAAG,EAClB,aAAa,EAAE,GAAG,EAClB,UAAU,EAAE,QAAQ,GACrB;;AATH,AAUE,eAVa,CAUb,QAAQ,CAAC,EACP,SAAS,EAAE,aAAa,EACxB,gBAAgB,EAAE,OAAO,GAC1B;;AAbH,AAcE,eAda,CAcb,WAAW,CAAC,EACV,OAAO,EAAE,CAAC,GACX;;AAhBH,AAiBE,eAjBa,CAiBb,WAAW,CAAC,EACV,SAAS,EAAE,cAAc,EACzB,gBAAgB,EAAE,OAAO,GAC1B;;AAGH,AACE,eADa,AAAA,UAAU,CACvB,QAAQ,CAAC,EACP,SAAS,EAAE,SAAS,GACrB;;AAHH,AAIE,eAJa,AAAA,UAAU,CAIvB,WAAW,CAAC,EACV,OAAO,EAAE,CAAC,GACX;;AANH,AAOE,eAPa,AAAA,UAAU,CAOvB,WAAW,CAAC,EACV,SAAS,EAAE,SAAS,GACrB;;AAMH,AAAA,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,EACb,SAAS,EAAE,IAAI,EACf,KAAK,EAAE,wBAAwB,GAChC;;AAED,AAAA,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,EACb,KAAK,EAAE,wBAAwB,GAChC;;AAID,AAAA,OAAO,CAAC,EACN,UAAU,EAAE,MAAM,GAkBnB;;AAnBD,AAEE,OAFK,CAEL,cAAc,CAAC,EACb,SAAS,EAAE,IAAI,GAYhB;;AAfH,AAKM,OALC,CAEL,cAAc,CAEZ,CAAC,CACC,CAAC,EAAE,MAAM,CAAC,EACR,KAAK,EAAE,wBAAwB,EAC/B,kBAAkB,EAAE,oBAAoB,GACzC;;AARP,AAUQ,OAVD,CAEL,cAAc,CAEZ,CAAC,CAKG,KAAK,CACL,CAAC,EAAE,MAAM,CAAC,EACR,KAAK,EAAE,yBAAyB,GACjC;;AAZT,AAgBE,OAhBK,CAgBL,aAAa,CAAC,EACZ,SAAS,EAAE,MAAM,GAClB;;AAKH,AAAA,MAAM,AAAA,aAAa,CAAC,EAClB,gBAAgB,EAAE,6BAA6B,EAC/C,SAAS,EAAE,OAAO,GAanB;;AAfD,AAGE,MAHI,AAAA,aAAa,CAGjB,UAAU,CAAC,EACT,KAAK,EAAE,+BAA+B,EACtC,WAAW,EAAE,GAAG,EAChB,cAAc,EAAE,GAAG,GACpB;;AAPH,AAQE,MARI,AAAA,aAAa,CAQjB,CAAC,CAAC,EACA,KAAK,EAAE,+BAA+B,GAKvC;;AAdH,AAUI,MAVE,AAAA,aAAa,CAQjB,CAAC,CAEG,KAAK,CAAC,EACN,KAAK,EAAE,yBAAyB,EAChC,eAAe,EAAE,IAAI,GACtB;;AAIL,AAAA,MAAM,AAAA,cAAc,CAAC,EACnB,UAAU,EAAE,GAAG,CAAC,KAAK,CHpMD,OAAO,EGqM3B,WAAW,EAAE,IAAI,EACjB,cAAc,EAAE,IAAI,EACpB,SAAS,EAAE,MAAM,GAClB;;AAKD,AAAA,WAAW,CAAC,EACV,aAAa,EAAE,GAAG,CAAC,KAAK,CH9MJ,OAAO,EG+M3B,UAAU,EAAE,MAAM,EAClB,WAAW,EAAE,IAAI,EACjB,cAAc,EAAE,IAAI,GAKrB;;AATD,AAKE,WALS,CAKT,EAAE,CAAC,EACD,KAAK,EAAE,yBAAyB,EAChC,SAAS,EAAE,IAAI,GAChB;;AAGH,AAAA,UAAU,CAAC,EACT,MAAM,EAAE,CAAC,EACT,aAAa,EAAE,IAAI,EACnB,OAAO,EAAE,CAAC,GAmBX;;AAtBD,AAIE,UAJQ,CAIR,EAAE,CAAC,EACD,aAAa,EAAE,GAAG,CAAC,KAAK,CH7NN,OAAO,EG8NzB,UAAU,EAAE,IAAI,EAChB,WAAW,EAAE,IAAI,EACjB,cAAc,EAAE,IAAI,GAarB;;AArBH,AASI,UATM,CAIR,EAAE,CAKA,UAAU,CAAC,EACT,KAAK,EAAE,8BAA8B,EACrC,SAAS,EAAE,QAAQ,EACnB,aAAa,EAAE,CAAC,GACjB;;AAbL,AAcI,UAdM,CAIR,EAAE,CAUA,CAAC,CAAC,EACA,KAAK,EAAE,wBAAwB,EAC/B,eAAe,EAAE,IAAI,GAItB;;AApBL,AAiBM,UAjBI,CAIR,EAAE,CAUA,CAAC,CAGG,KAAK,CAAC,EACN,KAAK,EAAE,yBAAyB,GACjC;;AAKP,AAEI,WAFO,CACT,UAAU,CACR,UAAU,CAAC,EACT,KAAK,EAAE,wBAAwB,GAIhC;;AAPL,AAIM,WAJK,CACT,UAAU,CACR,UAAU,CAEN,KAAK,CAAC,EACN,KAAK,EHhPC,OAAO,GGiPd;;AANP,AAQI,WARO,CACT,UAAU,AAOP,OAAO,CAAC,UAAU,CAAC,EAClB,KAAK,EHrPG,OAAO,EGsPf,gBAAgB,EAAE,yBAAyB,GAI5C;;AAdL,AAWM,WAXK,CACT,UAAU,AAOP,OAAO,CAAC,UAAU,CAGf,KAAK,CAAC,EACN,gBAAgB,EAAE,yBAAyB,GAC5C;;AAQP,AACE,QADM,CACN,CAAC,CAAC,KAAK,CAAC,EACN,mBAAmB,EAAE,yBAAyB,EAC9C,eAAe,EAAE,IAAI,GACtB;;AAMH,AACE,SADO,CACP,UAAU,CAAC,EACT,KAAK,EAAE,IAAI,EACX,aAAa,EAAE,IAAI,GAgBpB;;AAnBH,AAKI,SALK,CACP,UAAU,CAIR,CAAC,CAAC,EACA,eAAe,EAAE,IAAI,GACtB;;AAPL,AASI,SATK,CACP,UAAU,CAQR,IAAI,CAAC,EACH,OAAO,EAAE,IAAI,EACb,WAAW,EAAE,MAAM,GACpB;;AAZL,AAeM,SAfG,CACP,UAAU,CAaR,KAAK,CACH,GAAG,CAAC,EACF,KAAK,EAAE,IAAI,GACZ;;AAjBP,AAqBE,SArBO,CAqBP,UAAU,CAAC,EACT,KAAK,EAAE,KAAK,EACZ,aAAa,EAAE,IAAI,GAmBpB;;AA1CH,AAyBI,SAzBK,CAqBP,UAAU,CAIR,CAAC,CAAC,EACA,KAAK,EAAE,KAAK,EACZ,eAAe,EAAE,IAAI,GAKtB;;AAhCL,AA6BM,SA7BG,CAqBP,UAAU,CAIR,CAAC,CAIG,KAAK,CAAC,EACN,KAAK,EAAE,yBAAyB,GACjC;;AA/BP,AAmCM,SAnCG,CAqBP,UAAU,CAaR,KAAK,CACH,GAAG,CAAC,EACF,KAAK,EAAE,IAAI,GACZ;;AArCP,AAsCM,SAtCG,CAqBP,UAAU,CAaR,KAAK,CAIH,WAAW,CAAC,EACV,KAAK,EHjTC,OAAO,GGkTd;;AAxCP,AA4CE,SA5CO,CA4CP,EAAE,AAAA,SAAS,CAAC,EACV,KAAK,EH5Ta,OAAO,EG6TzB,aAAa,EAAE,GAAG,CAAC,KAAK,CH7TN,OAAO,EG8TzB,WAAW,EAAE,MAAM,EACnB,UAAU,EAAE,IAAI,EAChB,aAAa,EAAE,IAAI,EACnB,UAAU,EAAE,KAAK,GAClB;;AAMH,AAAA,aAAa,CAAC,EACZ,UAAU,EAAE,IAAI,GAoHjB;;AArHD,AAEE,aAFW,CAEX,EAAE,CAAC,EACD,KAAK,EAAE,yBAAyB,EAChC,SAAS,EAAE,IAAI,EACf,UAAU,EAAE,MAAM,EAClB,UAAU,EAAE,GAAG,EACf,aAAa,EAAE,GAAG,GACnB;;AARH,AASE,aATW,CASX,EAAE,CAAC,EACD,aAAa,EAAE,IAAI,GAIpB;;AAdH,AAWI,aAXS,CASX,EAAE,CAEA,IAAI,CAAC,EACH,SAAS,EAAE,MAAM,GAClB;;AAbL,AAeE,aAfW,CAeX,EAAE,AAAA,KAAK,CAAC,EACN,KAAK,EHxVa,OAAO,EGyVzB,UAAU,EAAE,GAAG,CAAC,KAAK,CHzVH,OAAO,EG0VzB,WAAW,EAAE,IAAI,EACjB,UAAU,EAAE,IAAI,EAChB,aAAa,EAAE,KAAK,EACpB,UAAU,EAAE,KAAK,GAClB;;AAtBH,AAuBE,aAvBW,CAuBX,EAAE,AAAA,aAAa,CAAC,EACd,UAAU,EAAE,IAAI,EAChB,OAAO,EAAE,CAAC,EACV,UAAU,EAAE,CAAC,GA0Fd;;AApHH,AA4BI,aA5BS,CAuBX,EAAE,AAAA,aAAa,CAKb,EAAE,CAAC,EACD,aAAa,EAAE,IAAI,GAsFpB;;AAnHL,AA8BM,aA9BO,CAuBX,EAAE,AAAA,aAAa,CAKb,EAAE,CAEA,KAAK,CAAC,EACJ,MAAM,EAAE,IAAI,EACZ,aAAa,EAAE,MAAM,GAiBtB;;AAjDP,AAiCQ,aAjCK,CAuBX,EAAE,AAAA,aAAa,CAKb,EAAE,CAEA,KAAK,CAGH,IAAI,CAAC,EACH,OAAO,EAAE,YAAY,EACrB,gBAAgB,EAAE,yBAAyB,EAC3C,YAAY,EAAE,IAAI,EAClB,aAAa,EAAE,IAAI,GAOpB;;AA5CT,AAsCU,aAtCG,CAuBX,EAAE,AAAA,aAAa,CAKb,EAAE,CAEA,KAAK,CAGH,IAAI,CAKF,CAAC,CAAC,EACA,KAAK,EAAE,KAAK,GAIb;;AA3CX,AAwCY,aAxCC,CAuBX,EAAE,AAAA,aAAa,CAKb,EAAE,CAEA,KAAK,CAGH,IAAI,CAKF,CAAC,CAEG,KAAK,CAAC,EACN,eAAe,EAAE,IAAI,GACtB;;AA1Cb,AA6CQ,aA7CK,CAuBX,EAAE,AAAA,aAAa,CAKb,EAAE,CAEA,KAAK,CAeH,MAAM,CAAC,EACL,KAAK,EAAE,yBAAyB,CAAC,UAAU,EAC3C,MAAM,EAAE,GAAG,CAAC,KAAK,CAAC,yBAAyB,GAC5C;;AAhDT,AAkDM,aAlDO,CAuBX,EAAE,AAAA,aAAa,CAKb,EAAE,CAsBA,MAAM,CAAC,EACL,WAAW,EAAE,MAAM,GACpB;;AApDP,AAsDQ,aAtDK,CAuBX,EAAE,AAAA,aAAa,CAKb,EAAE,CAyBA,OAAO,CACL,CAAC,CAAC,EACA,aAAa,EAAE,GAAG,CAAC,MAAM,CAAC,yBAAyB,GAKpD;;AA5DT,AAwDU,aAxDG,CAuBX,EAAE,AAAA,aAAa,CAKb,EAAE,CAyBA,OAAO,CACL,CAAC,CAEG,KAAK,CAAC,EACJ,mBAAmB,EAAE,KAAK,EAC1B,eAAe,EAAE,IAAI,GACxB;;AA3DX,AA6DQ,aA7DK,CAuBX,EAAE,AAAA,aAAa,CAKb,EAAE,CAyBA,OAAO,GAQH,EAAE,CAAC,EACH,aAAa,EAAE,SAAS,EACxB,UAAU,EAAE,MAAM,GACnB;;AAhET,AAmEQ,aAnEK,CAuBX,EAAE,AAAA,aAAa,CAKb,EAAE,CAsCA,MAAM,CACJ,CAAC,AAAA,IAAI,CAAC,EACJ,KAAK,EAAE,wBAAwB,EAC/B,MAAM,EAAE,GAAG,CAAC,KAAK,CAAC,wBAAwB,EAC1C,YAAY,EAAE,IAAI,EAClB,aAAa,EAAE,IAAI,EACnB,WAAW,EAAE,OAAO,EACpB,cAAc,EAAE,OAAO,GAKxB;;AA9ET,AA0EU,aA1EG,CAuBX,EAAE,AAAA,aAAa,CAKb,EAAE,CAsCA,MAAM,CACJ,CAAC,AAAA,IAAI,CAOD,KAAK,CAAC,EACN,KAAK,EAAE,yBAAyB,EAChC,YAAY,EAAE,yBAAyB,GACxC;;AA7EX,AAgFM,aAhFO,CAuBX,EAAE,AAAA,aAAa,CAKb,EAAE,CAoDA,OAAO,CAAC,EACN,SAAS,EAAE,QAAQ,EACnB,UAAU,EAAE,GAAG,EACf,QAAQ,EAAE,MAAM,EAChB,UAAU,EAAE,OAAO,EACnB,kBAAkB,EAAE,UAAU,EAC9B,eAAe,EAAE,UAAU,EAC3B,cAAc,EAAE,UAAU,EAC1B,aAAa,EAAE,UAAU,EACzB,UAAU,EAAE,cAAc,GAW3B;;AApGP,AA2FQ,aA3FK,CAuBX,EAAE,AAAA,aAAa,CAKb,EAAE,CAoDA,OAAO,CAWL,CAAC,CAAC,EACA,WAAW,EAAE,KAAK,EAClB,MAAM,EAAE,IAAI,GACb;;AA9FT,AA+FQ,aA/FK,CAuBX,EAAE,AAAA,aAAa,CAKb,EAAE,CAoDA,OAAO,CAeL,GAAG,CAAC,EACF,SAAS,EAAE,GAAG,EACd,WAAW,EAAE,KAAK,EAClB,OAAO,EAAE,IAAI,GACd;;AAnGT,AAqGM,aArGO,CAuBX,EAAE,AAAA,aAAa,CAKb,EAAE,CAyEA,OAAO,AAAA,KAAK,CAAC,EACX,UAAU,EAAE,KAAK,EACjB,kBAAkB,EAAE,UAAU,EAC9B,eAAe,EAAE,UAAU,EAC3B,cAAc,EAAE,UAAU,EAC1B,aAAa,EAAE,UAAU,EACzB,UAAU,EAAE,cAAc,GAC3B;;AA5GP,AA6GM,aA7GO,CAuBX,EAAE,AAAA,aAAa,CAKb,EAAE,CAiFA,GAAG,AAAA,SAAS,AAAA,OAAO,CAAC,EAClB,MAAM,EAAE,MAAM,CAAC,GAAG,CAAC,sBAAsB,GAC1C;;AA/GP,AAgHM,aAhHO,CAuBX,EAAE,AAAA,aAAa,CAKb,EAAE,CAoFA,GAAG,AAAA,SAAS,AAAA,OAAO,AAAA,KAAK,CAAC,EACvB,YAAY,EAAE,wBAAwB,GACvC;;AAMP,AAAA,MAAM,AAAA,UAAU,CAAC,EACf,MAAM,EAAE,QAAQ,GACjB;;AAED,AAAA,GAAG,CAAC,EACF,KAAK,EAAE,yBAAyB,EAChC,gBAAgB,EAAE,2BAA2B,EAC7C,aAAa,EAAE,GAAG,EAClB,OAAO,EAAE,QAAQ,GAMlB;;AAVD,AAKE,GALC,CAKD,GAAG,EALL,GAAG,CAKI,IAAI,CAAC,EACR,gBAAgB,EAAE,WAAW,EAC7B,aAAa,EAAE,CAAC,EAChB,OAAO,EAAE,CAAC,GACX;;AAGH,AAAA,IAAI,CAAC,EACH,KAAK,EAAE,yBAAyB,EAChC,gBAAgB,EAAE,2BAA2B,EAC7C,aAAa,EAAE,GAAG,EAClB,OAAO,EAAE,OAAO,GACjB;;AAID,AAAA,IAAI,AAAA,WAAW,EACf,IAAI,AAAA,WAAW,CAAC,CAAC,EACjB,IAAI,AAAA,WAAW,CAAC,CAAC,CAAC,MAAM,EACxB,IAAI,AAAA,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC,EACtB,UAAU,EAAE,oBAAoB,EAChC,gBAAgB,EAAE,YAAY,GAC/B;;ACxfD,2HAEgF;AAEhF,AAAA,QAAQ,CAAC,EACP,gBAAgB,EJoBI,OAAO,CIpBS,UAAU,GAC/C;;AAED,AAAA,QAAQ,CAAC,EAAE,CAAC,EACV,KAAK,EAAE,wBAAwB,CAAC,UAAU,GAC3C;;AAED,AAAA,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,QAAQ,CAAC,CAAC,CAAC,EAC/B,KAAK,EAAE,wBAAwB,CAAC,UAAU,GAI3C;;AALD,AAEE,QAFM,CAAC,CAAC,CAEN,KAAK,EAFG,SAAS,CAAC,QAAQ,CAAC,CAAC,CAE5B,KAAK,CAAC,EACN,KAAK,EAAE,yBAAyB,CAAC,UAAU,GAC5C;;AAGH,AAAA,SAAS,CAAC,EACR,gBAAgB,EAAE,kBAAkB,GAUrC;;AAXD,AAEE,SAFO,CAEP,CAAC,EAFH,SAAS,CAEJ,CAAC,EAFN,SAAS,CAED,EAAE,EAFV,SAAS,CAEG,EAAE,EAFd,SAAS,CAEO,EAAE,EAFlB,SAAS,CAEW,EAAE,EAFtB,SAAS,CAEe,EAAE,EAF1B,SAAS,CAEmB,EAAE,CAAC,EAC3B,KAAK,EAAE,wBAAwB,CAAC,UAAU,GAC3C;;AAJH,AAKE,SALO,CAKP,CAAC,EALH,SAAS,CAKJ,EAAE,EALP,SAAS,CAKA,EAAE,EALX,SAAS,CAKI,EAAE,CAAC,EACZ,mBAAmB,EJAD,OAAO,CIAc,UAAU,GAClD;;AAPH,AAQE,SARO,CAQP,CAAC,CAAC,KAAK,CAAC,EACN,mBAAmB,EAAE,yBAAyB,CAAC,UAAU,GAC1D;;AAGH,AAAA,UAAU,CAAC,EACT,gBAAgB,EJRI,OAAO,CIQS,UAAU,EAC9C,KAAK,EAAE,+BAA+B,CAAC,UAAU,GAUlD;;AAZD,AAGE,UAHQ,CAGR,EAAE,EAHJ,UAAU,CAGJ,EAAE,EAHR,UAAU,CAGA,IAAI,CAAC,EACX,KAAK,EAAE,+BAA+B,CAAC,UAAU,GAClD;;AALH,AAME,UANQ,CAMR,CAAC,EANH,UAAU,CAML,CAAC,AAAA,kBAAkB,CAAC,EACrB,KAAK,EAAE,+BAA+B,CAAC,UAAU,GAIlD;;AAXH,AAQI,UARM,CAMR,CAAC,CAEG,KAAK,EARX,UAAU,CAML,CAAC,AAAA,kBAAkB,CAElB,KAAK,CAAC,EACN,KAAK,EAAE,yBAAyB,CAAC,UAAU,GAC5C" diff --git a/assets/bibliography/2018-12-22-distill.bib b/assets/bibliography/2018-12-22-distill.bib deleted file mode 100644 index 2b06f3c9..00000000 --- a/assets/bibliography/2018-12-22-distill.bib +++ /dev/null @@ -1,7 +0,0 @@ -@article{gregor2015draw, - title={DRAW: A recurrent neural network for image generation}, - author={Gregor, Karol and Danihelka, Ivo and Graves, Alex and Rezende, Danilo Jimenez and Wierstra, Daan}, - journal={arXiv preprint, arXiv:1502.04623}, - year={2015}, - url={https://arxiv.org/pdf/1502.04623.pdf} -} diff --git a/assets/bibliography/2023-09-24-foundations.bib b/assets/bibliography/2023-09-24-foundations.bib new file mode 100644 index 00000000..9d877a66 --- /dev/null +++ b/assets/bibliography/2023-09-24-foundations.bib @@ -0,0 +1,102 @@ +@article{gregor2015draw, + title={DRAW: A recurrent neural network for image generation}, + author={Gregor, Karol and Danihelka, Ivo and Graves, Alex and Rezende, Danilo Jimenez and Wierstra, Daan}, + journal={arXiv preprint, arXiv:1502.04623}, + year={2015}, + url={https://arxiv.org/pdf/1502.04623.pdf} +} + + +@InProceedings{pmlr-v202-radford23a, + title = {Robust Speech Recognition via Large-Scale Weak Supervision}, + author = {Radford, Alec and Kim, Jong Wook and Xu, Tao and Brockman, Greg and Mcleavey, Christine and Sutskever, Ilya}, + booktitle = {Proceedings of the 40th International Conference on Machine Learning}, + pages = {28492--28518}, + year = {2023}, + editor = {Krause, Andreas and Brunskill, Emma and Cho, Kyunghyun and Engelhardt, Barbara and Sabato, Sivan and Scarlett, Jonathan}, + volume = {202}, + series = {Proceedings of Machine Learning Research}, + month = {23--29 Jul}, + publisher = {PMLR}, + pdf = {https://proceedings.mlr.press/v202/radford23a/radford23a.pdf}, + url = {https://proceedings.mlr.press/v202/radford23a.html}, + abstract = {We study the capabilities of speech processing systems trained simply to predict large amounts of transcripts of audio on the internet. When scaled to 680,000 hours of multilingual and multitask supervision, the resulting models generalize well to standard benchmarks and are often competitive with prior fully supervised results without the need for any dataset specific fine-tuning. When compared to humans, the models approach their accuracy and robustness. We are releasing models and inference code to serve as a foundation for further work on robust speech processing.} +} + +@INPROCEEDINGS{liUniversal, + + author={Li, Xinjian and Dalmia, Siddharth and Li, Juncheng and Lee, Matthew and Littell, Patrick and Yao, Jiali and Anastasopoulos, Antonios and Mortensen, David R. and Neubig, Graham and Black, Alan W and Metze, Florian}, + + booktitle={ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}, + + title={Universal Phone Recognition with a Multilingual Allophone System}, + + year={2020}, + + volume={}, + + number={}, + + pages={8249-8253}, + + doi={10.1109/ICASSP40776.2020.9054362}} + + +@article{zhang2023google, + title={Google usm: Scaling automatic speech recognition beyond 100 languages}, + author={Zhang, Yu and Han, Wei and Qin, James and Wang, Yongqiang and Bapna, Ankur and Chen, Zhehuai and Chen, Nanxin and Li, Bo and Axelrod, Vera and Wang, Gary and others}, + journal={arXiv preprint arXiv:2303.01037}, + year={2023} +} + +@article{barrault2023seamlessm4t, + title={SeamlessM4T-Massively Multilingual \& Multimodal Machine Translation}, + author={Barrault, Lo{\"\i}c and Chung, Yu-An and Meglioli, Mariano Cora and Dale, David and Dong, Ning and Duquenne, Paul-Ambroise and Elsahar, Hady and Gong, Hongyu and Heffernan, Kevin and Hoffman, John and others}, + journal={arXiv preprint arXiv:2308.11596}, + year={2023} +} + +@article{pratap2023scaling, + title={Scaling speech technology to 1,000+ languages}, + author={Pratap, Vineel and Tjandra, Andros and Shi, Bowen and Tomasello, Paden and Babu, Arun and Kundu, Sayani and Elkahky, Ali and Ni, Zhaoheng and Vyas, Apoorv and Fazel-Zarandi, Maryam and others}, + journal={arXiv preprint arXiv:2305.13516}, + year={2023} +} + +@inproceedings{peng23d_interspeech, + author={Puyuan Peng and Brian Yan and Shinji Watanabe and David Harwath}, + title={Prompting the Hidden Talent of Web-Scale Speech Models for Zero-Shot Task Generalization}, + year=2023, + booktitle={Proc. INTERSPEECH 2023}, + pages={396--400}, + doi={10.21437/Interspeech.2023-2032} +} + +@article{shi2023ml, + title={ML-SUPERB: Multilingual Speech Universal PERformance Benchmark}, + author={Shi, Jiatong and Berrebbi, Dan and Chen, William and Chung, Ho-Lam and Hu, En-Pei and Huang, Wei Ping and Chang, Xuankai and Li, Shang-Wen and Mohamed, Abdelrahman and Lee, Hung-yi and others}, + journal={arXiv preprint arXiv:2305.10615}, + year={2023} +} + + +@article{chen2022wavlm, + title={Wavlm: Large-scale self-supervised pre-training for full stack speech processing}, + author={Chen, Sanyuan and Wang, Chengyi and Chen, Zhengyang and Wu, Yu and Liu, Shujie and Chen, Zhuo and Li, Jinyu and Kanda, Naoyuki and Yoshioka, Takuya and Xiao, Xiong and others}, + journal={IEEE Journal of Selected Topics in Signal Processing}, + volume={16}, + number={6}, + pages={1505--1518}, + year={2022}, + publisher={IEEE} +} + +@article{hsu2021hubert, + title={Hubert: Self-supervised speech representation learning by masked prediction of hidden units}, + author={Hsu, Wei-Ning and Bolte, Benjamin and Tsai, Yao-Hung Hubert and Lakhotia, Kushal and Salakhutdinov, Ruslan and Mohamed, Abdelrahman}, + journal={IEEE/ACM Transactions on Audio, Speech, and Language Processing}, + volume={29}, + pages={3451--3460}, + year={2021}, + publisher={IEEE} +} diff --git a/assets/img/blog/mlsuperb_map.png b/assets/img/blog/mlsuperb_map.png new file mode 100644 index 00000000..93fbe406 Binary files /dev/null and b/assets/img/blog/mlsuperb_map.png differ diff --git a/assets/img/blog/wavlablm.png b/assets/img/blog/wavlablm.png new file mode 100644 index 00000000..162637e9 Binary files /dev/null and b/assets/img/blog/wavlablm.png differ diff --git a/assets/img/blog/yodas_chars.png b/assets/img/blog/yodas_chars.png new file mode 100644 index 00000000..6629a0e3 Binary files /dev/null and b/assets/img/blog/yodas_chars.png differ diff --git a/assets/img/blog/yodas_langs.png b/assets/img/blog/yodas_langs.png new file mode 100644 index 00000000..065145cc Binary files /dev/null and b/assets/img/blog/yodas_langs.png differ diff --git a/assets/img/samuele.jpg b/assets/img/samuele.jpg new file mode 100644 index 00000000..9a8c1602 Binary files /dev/null and b/assets/img/samuele.jpg differ diff --git a/assets/js/distillpub/overrides.js b/assets/js/distillpub/overrides.js new file mode 100644 index 00000000..2d839626 --- /dev/null +++ b/assets/js/distillpub/overrides.js @@ -0,0 +1,24 @@ +$(document).ready(function() { + // Override styles of the footnotes. + document.querySelectorAll("d-footnote").forEach(function(footnote) { + footnote.shadowRoot.querySelector("sup > span") + .setAttribute("style", "color: var(--global-theme-color);"); + footnote.shadowRoot.querySelector("d-hover-box").shadowRoot.querySelector("style").sheet + .insertRule(".panel {background-color: var(--global-bg-color) !important;}"); + footnote.shadowRoot.querySelector("d-hover-box").shadowRoot.querySelector("style").sheet + .insertRule(".panel {border-color: var(--global-divider-color) !important;}"); + }); + // Override styles of the citations. + document.querySelectorAll("d-cite").forEach(function(cite) { + cite.shadowRoot.querySelector("div > span") + .setAttribute("style", "color: var(--global-theme-color);"); + cite.shadowRoot.querySelector("style").sheet + .insertRule("ul li a {color: var(--global-text-color) !important; text-decoration: none;}"); + cite.shadowRoot.querySelector("style").sheet + .insertRule("ul li a:hover {color: var(--global-theme-color) !important;}"); + cite.shadowRoot.querySelector("d-hover-box").shadowRoot.querySelector("style").sheet + .insertRule(".panel {background-color: var(--global-bg-color) !important;}"); + cite.shadowRoot.querySelector("d-hover-box").shadowRoot.querySelector("style").sheet + .insertRule(".panel {border-color: var(--global-divider-color) !important;}"); + }); +}) \ No newline at end of file diff --git a/post_examples/2018-12-22-distill.md b/post_examples/2018-12-22-distill.md index 9e7efd4c..e53f6cee 100644 --- a/post_examples/2018-12-22-distill.md +++ b/post_examples/2018-12-22-distill.md @@ -2,7 +2,7 @@ layout: distill title: a distill-style blog post description: an example of a distill-style blog post and main elements -date: 2021-05-22 +date: 2023-05-22 authors: - name: Albert Einstein @@ -18,7 +18,7 @@ authors: affiliations: name: IAS, Princeton -bibliography: 2018-12-22-distill.bib +bibliography: 2023-09-24-foundations.bib # Below is an example of injecting additional post-specific styles. # If you use this post as a template, delete this _styles block. @@ -40,11 +40,6 @@ _styles: > --- -**NOTE:** -Citations, footnotes, and code blocks do not display correctly in the dark mode since distill does not support the dark mode by default. -If you are interested in correctly adding dark mode support for distill, please open [a discussion](https://github.com/alshedivat/al-folio/discussions) and let us know. - - ## Equations This theme supports rendering beautiful math in inline and display modes using [MathJax 3](https://www.mathjax.org/){:target="\_blank"} engine.