diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index 7af4c12cd2e3d6..c8df8daea24268 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -66,8 +66,6 @@ jobs: npm install make rebuild-search-index ACTIVATE_ENV=pwd cat metadata/swagger.yaml | python bin/yaml2json.py > api/swagger.json - curl --retry 5 https://gallantries.github.io/video-library/api/videos.json > metadata/video-library.json - curl --retry 5 https://gallantries.github.io/video-library/api/sessions.json > metadata/session-library.json rdoc bin _plugins/ --output gtn_rdoc - name: Build Site diff --git a/.github/workflows/google-form.yml b/.github/workflows/google-form.yml index 80de537d0d1ec2..030fda314e1403 100644 --- a/.github/workflows/google-form.yml +++ b/.github/workflows/google-form.yml @@ -1,4 +1,4 @@ -name: "[Cron] Update news from Google Form" +name: "[Cron] Update news and recordings from Google Form" on: workflow_dispatch: schedule: @@ -35,19 +35,57 @@ jobs: bundle pristine ffi # END Dependencies - - name: Update google news + - name: Update news from Google Form id: generate run: | echo "new_ids=$(bundle exec ruby bin/google-form-news.rb)" >> $GITHUB_OUTPUT - - name: Create Pull Request + - name: Create Pull Request for News # If it's not a Pull Request then commit any changes as a new PR. if: | github.event_name != 'pull_request' && steps.generate.outputs.new_ids != '0' uses: peter-evans/create-pull-request@v3 with: - title: Import news posts from Google Form + commit-message: Import news posts from Google Form branch-suffix: timestamp - commit-message: New News Post! + title: "[Google Form] New News Post!" add-paths: news/_posts/ + + - name: Update recordings from Google Form + id: recordings + run: | + echo "new_recordings=$(bundle exec ruby bin/google-form-recordings.rb)" >> $GITHUB_OUTPUT + + - name: Create Pull Request for Recordings + # If it's not a Pull Request then commit any changes as a new PR. + if: | + github.event_name != 'pull_request' && + steps.generate.outputs.new_recordings == 'true' + uses: peter-evans/create-pull-request@v3 + with: + title: "[Google Form] New Recording Submission!" + branch-suffix: timestamp + commit-message: add new recording from Google Form submission + add-paths: topics/ + committer: "Saskia Hiltemann " + author: "Saskia Hiltemann " + body: | + New recording submitted! + + **Submission Process checklist** + - [x] **Instructor** Record video + - [x] **Instructor** Submit video via form + - [x] **GTN** Pull request made by bot + - [ ] **GTN** Check if adjustments to PR are necessary + - [ ] Check [Submissions Form](https://docs.google.com/spreadsheets/d/1iXjLlMEH5QMAMyUMHi1c_Lb7OiJhL_9hgJrtAsBoZ-Y/edit?usp=sharing) + - [ ] New contributors need to be added? + - [ ] **GTN** Upload video to [GalaxyProject YouTube](https://www.youtube.com/c/galaxyproject) + - [ ] **GTN** Add youtube video id to PR after upload + - [ ] **GTN** Put Autogenerated captions in Google Doc ([folder](https://drive.google.com/drive/folders/1liyqDQDbxMNXvpQIaqFmoE2fB2aJIH9N?usp=drive_link)) + - [ ] **Instructor** Fix autogenerated captions, comment here when done + - [ ] **GTN** Upload fixed captions to YouTube + - [ ] **GTN** Add the people who did the captioning to this PR + - [ ] **GTN** Merge PR! + + diff --git a/.github/workflows/monthly-release-backfill.yml b/.github/workflows/monthly-release-backfill.yml index 60931af22e02f3..001662dbefa256 100644 --- a/.github/workflows/monthly-release-backfill.yml +++ b/.github/workflows/monthly-release-backfill.yml @@ -69,8 +69,6 @@ jobs: npm install make rebuild-search-index ACTIVATE_ENV=pwd cat metadata/swagger.yaml | python bin/yaml2json.py > api/swagger.json - curl --retry 5 https://gallantries.github.io/video-library/api/videos.json > metadata/video-library.json - curl --retry 5 https://gallantries.github.io/video-library/api/sessions.json > metadata/session-library.json JEKYLL_ENV=production bundle exec jekyll build --strict_front_matter -d _site/training-material env: SOURCE_TAG: ${{ github.event.inputs.selected_tag }} diff --git a/.github/workflows/monthly-release.yml b/.github/workflows/monthly-release.yml index ca51febd5c7ad5..54176d0a4e633e 100644 --- a/.github/workflows/monthly-release.yml +++ b/.github/workflows/monthly-release.yml @@ -71,8 +71,6 @@ jobs: npm install make rebuild-search-index ACTIVATE_ENV=pwd cat metadata/swagger.yaml | ruby bin/yaml2json.rb > api/swagger.json - curl --retry 5 https://gallantries.github.io/video-library/api/videos.json > metadata/video-library.json - curl --retry 5 https://gallantries.github.io/video-library/api/sessions.json > metadata/session-library.json JEKYLL_ENV=production bundle exec jekyll build --strict_front_matter -d _site/training-material env: SOURCE_TAG: ${{ env.release_tag }} diff --git a/CONTRIBUTORS.yaml b/CONTRIBUTORS.yaml index cfa4606213e4ab..a623e6a580fde7 100644 --- a/CONTRIBUTORS.yaml +++ b/CONTRIBUTORS.yaml @@ -43,6 +43,12 @@ contributor2: halloffame: "no" joined: 2020-06 +# dummy contributor for video library +awspolly: + halloffame: "no" + joined: 2024-05 + name: Automated Text-to-Speech + # our real contributors <3 (please add yourself in alphabetical order) a-asai: @@ -143,6 +149,12 @@ andreasrichter: github: false joined: 2017-09 +andrewr: + name: Andrew Rajczewski + affiliations: + - minnesotauni + joined: 2024-05 + annatrigos: name: Anna Trigos github: false @@ -675,6 +687,10 @@ EngyNasr: - uni-freiburg - elixir-europe +ennovytje: + name: Yvonne Hiltemann + joined: 2024-05 + ecoissac: github: false name: Eric Coissac @@ -715,10 +731,15 @@ cumbof: orcid: 0000-0003-2920-5838 joined: 2018-02 + fidelram: name: Fidel Ramirez joined: 2017-09 +FilipposZ: + name: Filippos Zacharopoulos + joined: 2024-05 + foellmelanie: name: Melanie Föll email: melanie.foell@mol-med.uni-freiburg.de @@ -1263,6 +1284,12 @@ lleroi: orcid: 0000-0003-3748-4179 joined: 2021-11 +LonsBio: + name: Andrew Lonsdale + joined: 2024-05 + affiliations: + - petermac + loraine-gueguen: name: Loraine Guéguen joined: 2017-09 @@ -1280,6 +1307,12 @@ kkamieniecka: affiliations: - elixir-europe +ksuderman: + name: Keith Suderman + joined: 2024-05 + affiliations: + - johnshopkins + poterlowicz-lab: name: Krzysztof Poterlowicz twitter: bioinfbrad @@ -1306,6 +1339,12 @@ mjostaszewski: affiliations: - elixir-europe +MariaTsayo: + name: Maria Tsagiopoulou + joined: 2024-05 + affiliations: + - inab-certh + mariipia10: name: Maria Pia joined: 2021-10 @@ -1779,6 +1818,12 @@ proccaserra: affiliations: - elixir-europe +prodromus: + name: Jonathan Trow + joined: 2024-05 + affiliations: + - ncbi + ptsefton: name: Peter Sefton orcid: 0000-0002-3545-944X @@ -1854,6 +1899,16 @@ rlibouba: - eurosciencegateway - elixir-europe +rwinand: + name: Raf Winand + joined: 2024-05 + affiliations: + - sciensano + +s3by01: + name: Sébastien Fouilloux + joined: 2024-05 + samanthaanjei: name: Samantha Anjei joined: 2022-04 @@ -2286,6 +2341,13 @@ yvanlebras: - fnso2019 - elixir-europe +yvesvdb: + name: Yves Vandenbrouck + joined: 2024-05 + affiliations: + - ifb + + SNG888: name: Sandra Ng email: s.ng@qmul.ac.uk diff --git a/ORGANISATIONS.yaml b/ORGANISATIONS.yaml index ca83ccda83be10..777f3aaf442a43 100644 --- a/ORGANISATIONS.yaml +++ b/ORGANISATIONS.yaml @@ -88,6 +88,11 @@ ifb: avatar: "/training-material/shared/images/ifb.png" github: false +inab-certh: + name: INAB|CERTH + url: "https://www.inab.certh.gr/" + avatar: "/training-material/shared/images/inab-certh.png" + irccs: name: IRCCS Ospedale San Raffaele url: "https://www.hsr.it/" @@ -100,6 +105,12 @@ jetstream2: avatar: https://jetstream-cloud.org/images/home/jetstream-2.png github: false +johnshopkins: + name: Johns Hopkins University + url: "https://www.jhu.edu/" + avatar: "/training-material/shared/images/hopkins.png" + github: false + linq: name: LINQ description: LINQ management GmbH @@ -107,12 +118,23 @@ linq: avatar: "/training-material/shared/images/linq.jpg" github: false +minnesotauni: + name: University of Minnesota + url: "https://twin-cities.umn.edu/" + avatar: "/training-material/shared/images/minnesotauni.png" + github: false + MPIIE: name: Max Planck Institute of Immunology and Epigenetics url: https://www.ie-freiburg.mpg.de avatar: https://raw.githubusercontent.com/bgruening/presentations/master/shared/resources/img/14_MPI_IE_logo_mit_180.gif github: false +ncbi: + name: National Center for Biotechnology Information + url: "https://www.ncbi.nlm.nih.gov/" + avatar: "/training-material/shared/images/ncbi.png" + panacea: name: PanACEA description: Pan-African Consortium for the Evaluation of Antituberculosis Antibiotics @@ -127,6 +149,11 @@ pangens: avatar: "/training-material/shared/images/pangens.jpg" github: false +petermac: + name: Peter MacCallum Cancer Centre + url: "https://www.petermac.org/" + avatar: "/training-material/shared/images/petermac.png" + pndb: name: Pôle National de Données de Biodiversité url: https://www.pndb.fr/ @@ -157,11 +184,11 @@ san-raffaele-uni: avatar: "/training-material/shared/images/uni_san_raffaele.svg" github: false -tb-capt: - name: TB-CAPT - url: https://www.tb-capt.org/ - avatar: "/training-material/shared/images/tb_capt.svg" - github: false +sciensano: + name: Sciensano + url: "https://www.sciensano.be" + avatar: "/training-material/shared/images/sciensano.jpg" + github: false swiss-tph: name: Swiss Tropical and Public Health Institute @@ -169,6 +196,12 @@ swiss-tph: avatar: "/training-material/shared/images/swiss-tph.png" github: false +tb-capt: + name: TB-CAPT + url: https://www.tb-capt.org/ + avatar: "/training-material/shared/images/tb_capt.svg" + github: false + uga: name: Université Grenoble-Alpes url: https://www.univ-grenoble-alpes.fr/ @@ -194,4 +227,3 @@ vib: github: false - diff --git a/_includes/contributor-human-icons.html b/_includes/contributor-human-icons.html index a9c65ce786c954..8ea3bc154acdfd 100644 --- a/_includes/contributor-human-icons.html +++ b/_includes/contributor-human-icons.html @@ -13,7 +13,15 @@ {% elsif include.activity == "funding" %} 💵 {% elsif include.activity == "maintainer" %} - 🧑‍🏫 + +{% elsif include.activity == "captioner" %} + 💬 +{% elsif include.activity == "speaker" %} + 🗣 +{% elsif include.activity == "organisers" %} + 🎪 +{% elsif include.activity == "instructors" %} + 🧑‍🏫 {% else %} {{ include.activity }} {% endif %} diff --git a/_includes/resource-recordings.html b/_includes/resource-recordings.html new file mode 100644 index 00000000000000..4accaa351f65a5 --- /dev/null +++ b/_includes/resource-recordings.html @@ -0,0 +1,56 @@ +{% assign locale = site.data.lang[page.lang] %} + +{% assign tuto_recordings = include.material.recordings | default: page.recordings %} +{% assign slides_recordings = include.material.slides_recordings %} + +{% if slides_recordings %} + {% assign slides_recordings = slides_recordings | sort: 'date' | reverse %} +{% endif %} + +{% if tuto_recordings %} + {% assign tuto_recordings = tuto_recordings | sort: 'date' | reverse %} +{% endif %} + +{% if tuto_recordings or include.material.slides_recordings %} + +{% if include.wrapper %} +
  • +{% else %} +
    +{% endif %} + + + + + + + + {% if include.wrapper %} +
  • + {% else %} + + {% endif %} + +{% endif %} diff --git a/_includes/resource-video-library.html b/_includes/resource-video-library.html deleted file mode 100644 index cccfc2fb586aa4..00000000000000 --- a/_includes/resource-video-library.html +++ /dev/null @@ -1,91 +0,0 @@ -{% assign locale = site.data.lang[page.lang] %} - -{% assign tut = include.material.url | default: page.url %} -{% assign id-tutorial = tut | remove: '/topics/' | remove: "/tutorials" | remove: '.html' %} -{% assign id-slides = id-tutorial | replace: '/tutorial', '/slides' %} -{% assign id-demo = id-tutorial | replace: '/tutorial', '/demo' %} -{% assign id-both = id-tutorial | replace: '/tutorial', '' %} - -{% assign hasvideo-tutorial = site.data['video-library'][id-tutorial] %} -{% assign hasvideo-slides = site.data['video-library'][id-slides] %} -{% assign hasvideo-demo = site.data['video-library'][id-demo] %} -{% assign hasvideo-both = site.data['video-library'][id-both]%} -{% assign hassession = site.data['session-library'][id-tutorial]%} - - {% if hasvideo-tutorial or hasvideo-slides or hasvideo-both or hasvideo-demo or hassession %} - - {% if include.wrapper %} -
  • - {% else %} -
    - {% endif %} - - - {% if include.wrapper %} -
  • - {% else %} - - {% endif %} - {% endif %} diff --git a/_includes/tutorial_list.html b/_includes/tutorial_list.html index a3df3373ac09a3..57f63d87a8d4fe 100644 --- a/_includes/tutorial_list.html +++ b/_includes/tutorial_list.html @@ -77,7 +77,7 @@
    {% include _includes/resource-slides.html material=material topic=topic.name %} {% include _includes/resource-handson.html material=material topic=topic.name %} - {% include _includes/resource-video-library.html material=material topic=topic.name %} + {% include _includes/resource-recordings.html material=material topic=topic.name %} {% if topic.type == "use" %} {% include _includes/resource-zenodo.html material=material topic=topic.name %} @@ -93,7 +93,7 @@ {% include _includes/resource-slides.html material=material topic=topic.name %} {% include _includes/resource-handson.html material=material topic=topic.name %} - {% include _includes/resource-video-library.html material=material topic=topic.name %} + {% include _includes/resource-recordings.html material=material topic=topic.name %} {% if topic.type == "use" %} {% include _includes/resource-zenodo.html material=material topic=topic.name %} diff --git a/_includes/youtube.html b/_includes/youtube.html index 7d92a648622823..c18c2eed89e6fb 100644 --- a/_includes/youtube.html +++ b/_includes/youtube.html @@ -1,4 +1,6 @@ +{% unless include.nofigure %}
    +{% endunless %} +{% unless include.nofigure %}
    Video: {{ include.title }}
    +{% endunless %} diff --git a/_layouts/base_slides.html b/_layouts/base_slides.html index 04e7eae7f6f7dd..8c404aebe405fc 100644 --- a/_layouts/base_slides.html +++ b/_layouts/base_slides.html @@ -113,12 +113,20 @@ -
    +
    {% if page.video %} {% icon video-slides %} {{locale['video-slides'] | default: "Video slides"}} | {% endif %} - {% icon text-document %} {{locale['plaintext-slides'] | default: "Plain-text slides"}} +{% icon text-document %} {{locale['plaintext-slides'] | default: "Plain-text slides"}} | + +{% if page.recordings %} +{% include _includes/resource-recordings.html label="Recordings" %} +{% endif %} + + + +
    diff --git a/_layouts/contributor_index.html b/_layouts/contributor_index.html index 596fe496d47206..5ec4ab0f72fd3f 100644 --- a/_layouts/contributor_index.html +++ b/_layouts/contributor_index.html @@ -131,7 +131,11 @@

    Tutorials

    {{ res[0].title }} {% assign res_activity = res[1] %} - {% include _includes/contributor-human-icons.html activity=res_activity %} + {% if res_activity %} + {% for act in res_activity %} + {% include _includes/contributor-human-icons.html activity=act %} + {% endfor %} + {% endif %} {% endfor %} @@ -152,7 +156,11 @@

    Slides

    {{ res[0].title }} {% assign res_activity = res[1] %} - {% include _includes/contributor-human-icons.html activity=res_activity %} + {% if res_activity %} + {% for act in res_activity %} + {% include _includes/contributor-human-icons.html activity=act %} + {% endfor %} + {% endif %} {% endfor %} @@ -167,7 +175,55 @@

    Learning Pathways

    {{ res[0].title }} {% assign res_activity = res[1] %} - {% include _includes/contributor-human-icons.html activity=res_activity %} + {% if res_activity %} + {% for act in res_activity %} + {% include _includes/contributor-human-icons.html activity=act %} + {% endfor %} + {% endif %} + + {% endfor %} + + {% endif %} + + {% if page.videos_count > 0 %} +

    Video Recordings

    +
      + {% for res in page.videos %} +
    • + {% assign topic_id = res[0] | get_topic %} + {% assign topic = site.data[topic_id] %} + + {{ topic.title }} + + / + + {{ res[0].title }} + + {% assign res_activity = res[1] %} + {% if res_activity %} + {% for act in res_activity %} + {% include _includes/contributor-human-icons.html activity=act %} + {% endfor %} + {% endif %} +
    • + {% endfor %} +
    + {% endif %} + + {% if page.events_count > 0 %} +

    Events

    +
      + {% for res in page.events %} +
    • + + {{ res[0].title }} + + {% assign res_activity = res[1] %} + {% if res_activity %} + {% for act in res_activity %} + {% include _includes/contributor-human-icons.html activity=act %} + {% endfor %} + {% endif %}
    • {% endfor %}
    @@ -175,15 +231,13 @@

    Learning Pathways

    {% if page.news_count > 0 %} -

    News

    -
    - {% assign news_sorted = page.news | sort: 'date' | reverse %} - {% for n in news_sorted %} - {% include _includes/news-card.html news=n %} - {% endfor %} -
    +

    News

    +
    + {% for n in page.news %} + {% include _includes/news-card.html news=n %} + {% endfor %} +
    {% endif %} -
    diff --git a/_layouts/recordings.html b/_layouts/recordings.html new file mode 100644 index 00000000000000..8ea12e2dbe463c --- /dev/null +++ b/_layouts/recordings.html @@ -0,0 +1,179 @@ +--- +layout: base +--- + +{% comment %} + These variables are set in _plugins/generator-recordings.rb +{% endcomment %} + +{% assign material = page.material %} + +
    +

    Recordings - {{ material.title }}

    + +
    +
    +

    This is a collection of recordings from various training events where the {{ material.title }} tutorial was taught by members of the GTN community. The tutorial may have changed after the recording was made; below each video you will find a link to the tutorial as it appeared at the time of recording.

    + +

    Want to add your own recording? We would love to have it to our library!

    + + +
    + +
    + GTN video library logo +
    + +
    + + {% if material.recordings %} + {% assign recordings = material.recordings %} + {% if material.slides_recordings %} + {% assign recordings = recordings | concat: material.slides_recordings %} + {% endif %} + {% else if material.slides_recordings %} + {% assign recordings = material.slides_recordings %} + {% endif %} + + {% if recordings %} + {% assign recordings = recordings | uniq | sort: 'date' | reverse %} + + + + {% for recording in recordings %} + + {% if material.slides_recordings contains recording %} + {% assign default_type = "Lecture" %} + {% else %} + {% assign default_type = "Tutorial" %} + {% endif %} + {% assign recording-type = recording.type | default: default_type %} +

    {{recording-type }} {% if recording.youtube_id contains 'videoseries' %}Playlist{% else %}Recording{% endif %} - {{ recording.date | date: "%-d %B %Y" }}

    +
    + +
    + {% include _includes/youtube.html id=recording.youtube_id width="100%" height="100%" nofigure=true%} +
    + + + +
    +

    + + {% endfor %} + +{% endif %} +
    + + diff --git a/_layouts/tutorial_hands_on.html b/_layouts/tutorial_hands_on.html index 6ec6ff4573ccc7..d1fb768c37c191 100644 --- a/_layouts/tutorial_hands_on.html +++ b/_layouts/tutorial_hands_on.html @@ -147,7 +147,7 @@

    Under Development!

    {% endif %} - {% include _includes/resource-video-library.html label=true wrapper=true %} + {% include _includes/resource-recordings.html label=true wrapper=true %} {% if tuto_has_docker %} @@ -512,7 +512,7 @@

    {{locale['references']| default: "Funding" }}

      {% for feed in feedback[1] %}
    • - {{ feed.rating | to_stars }}: + {{ feed.rating | to_stars }}: {% if feed.pro %}Liked: {{ feed.pro }}{% endif %} {% if feed.con %}Disliked: {{ feed.con }}{% endif %}
    • diff --git a/_plugins/api.rb b/_plugins/api.rb index a75d9c4a91d77c..f838af9b316bcd 100644 --- a/_plugins/api.rb +++ b/_plugins/api.rb @@ -386,6 +386,9 @@ def generate(site) slides_data = site.pages.select { |p2| p2.url == "/#{directory}/slides.html" }[0] p.update(slides_data.data) if slides_data&.data + if !Dir.exist?(File.dirname(path)) + FileUtils.mkdir_p(File.dirname(path)) + end File.write(path, JSON.generate(p)) end @@ -394,6 +397,9 @@ def generate(site) p = material.dup p.delete('ref') p['contributors'] = Gtn::Contributors.get_contributors(p).dup.map { |c| mapContributor(site, c) } + if !Dir.exist?(File.dirname(path)) + FileUtils.mkdir_p(File.dirname(path)) + end File.write(path, JSON.generate(p)) end end diff --git a/_plugins/author-page.rb b/_plugins/author-page.rb index 87d9997f70ea41..14a49a63b3dd13 100644 --- a/_plugins/author-page.rb +++ b/_plugins/author-page.rb @@ -59,6 +59,8 @@ def generate(site) learning_pathways_by_author = Hash.new { |hash, key| hash[key] = [] } slides_by_author = Hash.new { |hash, key| hash[key] = [] } news_by_author = Hash.new { |hash, key| hash[key] = [] } + events_by_author = Hash.new { |hash, key| hash[key] = [] } + videos_by_author = Hash.new { |hash, key| hash[key] = [] } has_philosophy = Hash.new { false } site.pages.each do |t| @@ -70,6 +72,13 @@ def generate(site) pusher(t, slides_by_author, false) end + pusher(t, events_by_author, false) if t['layout'] == 'event' + + t.data.fetch('recordings', []).each do |r| + r.fetch('captioners', []).each { |ent| videos_by_author[ent].push([t, 'captioner', r]) } + r.fetch('speakers', []).each { |ent| videos_by_author[ent].push([t, 'speaker', r]) } + end + pusher(t, learning_pathways_by_author, false) if t['layout'] == 'learning-pathway' # Philosophies @@ -95,15 +104,19 @@ def generate(site) page2.data['title'] = "GTN Contributor: #{name}" page2.data['layout'] = 'contributor_index' - page2.data['tutorials'] = tutorials_by_author[contributor] - page2.data['slides'] = slides_by_author[contributor] + page2.data['tutorials'] = tutorials_by_author[contributor].group_by{|x| x[0] }.map{|k, v| [k, v.map{|vv| vv[1]}.compact]} + page2.data['slides'] = slides_by_author[contributor].group_by{|x| x[0] }.map{|k, v| [k, v.map{|vv| vv[1]}.compact]} page2.data['news'] = news_by_author[contributor] page2.data['learning_pathways'] = learning_pathways_by_author[contributor] + page2.data['events'] = events_by_author[contributor].group_by{|x| x[0] }.map{|k, v| [k, v.map{|vv| vv[1]}.compact]} + page2.data['videos'] = videos_by_author[contributor].group_by{|x| x[0] }.map{|k, v| [k, v.map{|vv| vv[1]}.uniq.compact]} page2.data['tutorials_count'] = tutorials_by_author[contributor].length page2.data['slides_count'] = slides_by_author[contributor].length page2.data['news_count'] = news_by_author[contributor].length page2.data['learning_pathways_count'] = learning_pathways_by_author[contributor].length + page2.data['events_count'] = events_by_author[contributor].length + page2.data['videos_count'] = videos_by_author[contributor].length page2.data['editors'] = TopicFilter.enumerate_topics(site).select do |t| t.fetch('editorial_board', []).include?(contributor) diff --git a/_plugins/feeds.rb b/_plugins/feeds.rb index 30c61010acc231..bcfa54ac3a113e 100644 --- a/_plugins/feeds.rb +++ b/_plugins/feeds.rb @@ -135,11 +135,11 @@ def all_date_sorted_materials(site) materials.each do |m| tags = [m['topic_name']] + (m['tags'] || []) - bucket += m['ref_tutorials'].map do |t| + bucket += m.fetch('ref_tutorials', []).map do |t| [Gtn::PublicationTimes.obtain_time(t.path).to_datetime, 'tutorials', t, tags] end - bucket += m['ref_slides'].reject { |s| s.url =~ /-plain.html/ }.map do |s| + bucket += m.fetch('ref_tutorials', []).reject { |s| s.url =~ /-plain.html/ }.map do |s| [Gtn::PublicationTimes.obtain_time(s.path).to_datetime, 'slides', s, tags] end end diff --git a/_plugins/generator-recordings.rb b/_plugins/generator-recordings.rb new file mode 100644 index 00000000000000..a34ed62e178c6a --- /dev/null +++ b/_plugins/generator-recordings.rb @@ -0,0 +1,35 @@ +# frozen_string_literal: true + +require './_plugins/gtn' + +module Jekyll + ## + # This class generates the GTN's author pags + class RecordingPageGenerator < Generator + safe true + + ## + # This generates the recording pages, where needed. + # Params + # +site+:: The site object + def generate(site) + Jekyll.logger.info "[GTN/Videos] Generating recording pages" + materials = TopicFilter + .list_all_materials(site) + + with_video = materials + .select{|m| m.has_key? 'recordings' or m.has_key? 'slide_recordings'} + + Jekyll.logger.info "[GTN/Videos] #{with_video.length} materials with recordings found." + materials.each do |material| + page2 = PageWithoutAFile.new(site, '', material['dir'], 'recordings/index.html') + page2.content = nil + page2.data['layout'] = 'recordings' + page2.data['topic_name'] = material['topic_name'] + page2.data['tutorial_name'] = material['tutorial_name'] + page2.data['material'] = material + site.pages << page2 + end + end + end +end diff --git a/_plugins/gtn.rb b/_plugins/gtn.rb index a9ec734dc0b042..ea45b8b0eb0a24 100644 --- a/_plugins/gtn.rb +++ b/_plugins/gtn.rb @@ -14,6 +14,8 @@ require './_plugins/util' require './_plugins/jekyll-topic-filter' require 'time' +require 'net/http' + Jekyll.logger.info "[GTN] Jekyll env: #{Jekyll.env}" Jekyll.logger.info "[GTN] You are running #{RUBY_VERSION} released on #{RUBY_RELEASE_DATE} for #{RUBY_PLATFORM}" @@ -69,6 +71,17 @@ def elixirnode2name(name) ELIXIR_NODES[name] end + def url_exists(url) + cache.getset("url-exists-#{url}") do + uri = URI.parse(url) + http = Net::HTTP.new(uri.host, uri.port) + http.use_ssl = true if uri.scheme == 'https' + response = http.request_head(uri.path) + #Jekyll.logger.warn response + response.code == '200' + end + end + ## # Obtain the most cited paper in the GTN # Params: @@ -527,8 +540,10 @@ def get_recent_feedbacks(site, material_id) def tutorials_over_time_bar_chart(site) graph = Hash.new(0) TopicFilter.list_all_materials(site).each do |material| - yymm = material['pub_date'].strftime('%Y-%m') - graph[yymm] += 1 + if material['pub_date'] + yymm = material['pub_date'].strftime('%Y-%m') + graph[yymm] += 1 + end end # Cumulative over time @@ -817,12 +832,13 @@ def find_learningpaths_including_topic(site, topic_id) ## # We're going to do some find and replace, to replace `@gtn:contributorName` with a link to their profile. Jekyll::Hooks.register :site, :pre_render do |site| + pfo_keys = site.data['contributors'].keys + site.data['funders'].keys + site.data['organisations'].keys site.posts.docs.each do |post| if post.content post.content = post.content.gsub(/@gtn:([a-zA-Z0-9_-]+)/) do |match| # Get first capture name = match.gsub('@gtn:', '') - if site.data['contributors'].key?(name) + if pfo_keys.include?(name) "{% include _includes/contributor-badge-inline.html id=\"#{name}\" %}" else match @@ -834,7 +850,7 @@ def find_learningpaths_including_topic(site, topic_id) if page.content page.content = page.content.gsub(/@gtn:([a-zA-Z0-9_-]+)/) do |match| name = match.gsub('@gtn:', '') - if site.data['contributors'].key?(name) + if pfo_keys.include?(name) "{% include _includes/contributor-badge-inline.html id=\"#{name}\" %}" else match diff --git a/_plugins/jekyll-topic-filter.rb b/_plugins/jekyll-topic-filter.rb index 98cd7be9ce5940..c999b810d8020e 100644 --- a/_plugins/jekyll-topic-filter.rb +++ b/_plugins/jekyll-topic-filter.rb @@ -274,6 +274,8 @@ def self.annotate_path(path, layout) material['type'] = 'rmd' elsif parts[4] == 'workflows' material['type'] = 'workflow' + elsif parts[4] == 'recordings' + material['type'] = 'recordings' elsif parts[4] == 'tours' material['type'] = 'tour' elsif parts[-1] == 'index.md' @@ -466,12 +468,14 @@ def self.resolve_material(site, material) page = nil slide_has_video = false + slide_has_recordings = false slide_translations = [] page_ref = nil if slides.length.positive? page = slides.min { |a, b| a[1].path <=> b[1].path }[1] slide_has_video = page.data.fetch('video', false) + slide_has_recordings = page.data.fetch('recordings', false) slide_translations = page.data.fetch('translations', []) page_ref = page end @@ -497,16 +501,6 @@ def self.resolve_material(site, material) page_obj['ref_slides'] = slides.map { |a| a[1] } id = page_obj['id'] - page_obj['video_library'] = {} - - if site.data.key?('video-library') - page_obj['video_library']['tutorial'] = site.data['video-library']["#{id}/tutorial"] - page_obj['video_library']['slides'] = site.data['video-library']["#{id}/slides"] - page_obj['video_library']['demo'] = site.data['video-library']["#{id}/demo"] - page_obj['video_library']['both'] = site.data['video-library'][id] - end - - page_obj['video_library']['session'] = site.data['session-library'][id] if site.data.key?('session-library') # Sometimes `hands_on` is set to something like `external`, in which # case it is important to not override it. So we only do that if the @@ -632,6 +626,7 @@ def self.resolve_material(site, material) page_obj['tours'] = tours.length.positive? page_obj['video'] = slide_has_video + page_obj['slides_recordings'] = slide_has_recordings page_obj['translations'] = {} page_obj['translations']['tutorial'] = tutorial_translations page_obj['translations']['slides'] = slide_translations @@ -1037,6 +1032,26 @@ def identify_funders(materials, site) TopicFilter.identify_funders(materials, site) end + def list_videos(site) + TopicFilter.list_all_materials(site) + .select { |k, _v| k['recordings'] || k['slides_recordings'] } + .map { |k, _v| (k['recordings'] || []) + (k['slides_recordings'] || []) } + .flatten + end + + def findDuration(duration) + if ! duration.nil? + eval(duration.gsub(/H/, ' * 3600 + ').gsub(/M/, ' * 60 + ').gsub(/S/, ' + ') + " 0") + else + 0 + end + end + + def list_videos_total_time(site) + vids = list_videos(site) + vids.map { |v| findDuration(v['length']) }.sum / 3600.0 + end + def list_draft_materials(site) TopicFilter.list_all_materials(site).select { |k, _v| k['draft'] } end diff --git a/assets/css/main.scss b/assets/css/main.scss index 887d303ed12c20..c7cccde8317fde 100644 --- a/assets/css/main.scss +++ b/assets/css/main.scss @@ -1881,3 +1881,40 @@ body[data-brightness="dark"] { filter: invert(1); } } + + +.recording { +} + +.recording-video { + width: 75%; + + iframe { + width:100%; + height: 100%; + aspect-ratio: 16 / 9; + } +} + +.recording-metadata { + table tbody { + display: block; + } + + table tr td { + padding: 0.3em; + width: 100%; + display: table-cell; + } + table tr td:first-child { + text-align: right; + display: table-cell; + width: 25%; + } + table tr td:nth-child(2) { + text-align: left; + display: table-cell; + width: 100%; + } + +} diff --git a/assets/images/gtn-videolibrary-logo.png b/assets/images/gtn-videolibrary-logo.png new file mode 100644 index 00000000000000..32de64b0f753d7 Binary files /dev/null and b/assets/images/gtn-videolibrary-logo.png differ diff --git a/assets/images/video-library.png b/assets/images/video-library.png new file mode 100644 index 00000000000000..0ffc1a9a1d233c Binary files /dev/null and b/assets/images/video-library.png differ diff --git a/bin/google-form-recordings.rb b/bin/google-form-recordings.rb new file mode 100755 index 00000000000000..33d4653d5a01a6 --- /dev/null +++ b/bin/google-form-recordings.rb @@ -0,0 +1,96 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +require 'yaml' +require 'net/http' +require 'csv' +require 'date' +require 'yaml' + +# Fetch data from a google sheet +url = 'https://docs.google.com/spreadsheets/d/1iXjLlMEH5QMAMyUMHi1c_Lb7OiJhL_9hgJrtAsBoZ-Y/export?format=tsv' +data = `curl -sL "#{url}"` +new_recordings = false + +data = CSV.parse(data, col_sep: "\t", headers: true, quote_char: '|') +count = 0 + +# define some columns +col_material = 3 +col_length = 5 +col_speakers = 6 +col_galaxyversion = 10 + +## recordings metadata definition on tutorials/slides +# +# recordings: +# - speakers: +# - shiltemann +# captioners: +# - hexylena +# - bebatut +# date: '2020-06-12' +# galaxy_version: '20.05' +# length: 51M +# youtube_id: "oAVjF_7ensg" + +data.each do |row| + # Parse + # 29/01/2024 14:04:47 + submission_date = DateTime.strptime(row['Timestamp'], '%d/%m/%Y %H:%M:%S') + + # extract metadata from Google form + length = row[col_length] + galaxy_version= row[col_galaxyversion] + speakers = row[col_speakers] + date = submission_date.strftime('%Y-%m-%d') + + if row[col_material] == 'TESTING' + STDERR.puts "Skipping #{filename} as it is a test" + next + end + + material_file = row[col_material].gsub("tutorial.html","tutorial.md").gsub("https://training.galaxyproject.org/","").gsub("training-material/","") + + bot_timestamp = submission_date.to_time.to_i + recording_metadata = {"youtube_id" => "TODO", + "length" => length, + "galaxy_version" => galaxy_version, + "date" => "'#{date}'", + "speakers" => "[#{speakers}]", + "captioners" => "[#{speakers}]", + "bot-timestamp" => bot_timestamp } + + # append metadata into GTN material + material_metadata = YAML.load_file(material_file) + + if material_metadata["recordings"] + # check the "bot_timestamp" + exists = false + for rec in material_metadata["recordings"] + puts rec["bot-timestamp"] + if rec["bot-timestamp"].to_s == bot_timestamp.to_s + exists = true + end + end + + if !exists + material_metadata["recordings"].push(recording_metadata) + new_recordings = true + end + else + material_metadata["recordings"] = [recording_metadata] + new_recordings = true + end + + #pp material_metadata + + # write to file + material_original = File.open(material_file,"r").read.split("---\n",3) + + outfile = File.open(material_file,"w") + outfile.write("#{material_metadata.to_yaml}\n\n---\n\n#{material_original[2]}") + +end + +puts new_recordings diff --git a/bin/lint.rb b/bin/lint.rb index 0393e4f282c0d0..309345b1efafc1 100755 --- a/bin/lint.rb +++ b/bin/lint.rb @@ -141,7 +141,7 @@ def self.youtube_bad(contents) match_end: selected.end(0) + 1, replacement: '', message: 'Instead of embedding IFrames to YouTube contents, consider adding this video to the ' \ - '[GTN Video Library](https://github.com/gallantries/video-library/issues/) where it will ' \ + 'GTN tutorial "recordings" metadata where it will ' \ 'be more visible for others.', code: 'GTN:002' ) diff --git a/bin/schema-slides.yaml b/bin/schema-slides.yaml index fe541278fff4ea..d4c703909fada9 100644 --- a/bin/schema-slides.yaml +++ b/bin/schema-slides.yaml @@ -395,3 +395,42 @@ mapping: pattern: /^\/.*/ examples: - /assets/images/gat.png + + recordings: + type: seq + sequence: + - type: map + mapping: + captioners: + type: seq + sequence: + - type: str + enum: + - CONTRIBUTORS + speakers: + type: seq + sequence: + - type: str + enum: + - CONTRIBUTORS + bot-timestamp: + type: int + date: + type: str + required: true + pattern: /[0-9]{4}-[0-9]{2}-[0-9]{2}/ + galaxy_version: + type: str + pattern: /[0-9]{2}\.[0-9]{1,2}/ + length: + type: str + pattern: /^(?:([0-9]*)[Hh])*(?:([0-9]*)[Mm])*(?:([0-9.]*)[Ss])*$/ + youtube_id: + type: str + pattern: /[A-Za-z0-9_-]{9,13}/ + type: + type: str + archive-id: + type: str + description: + type: str diff --git a/bin/schema-tutorial.yaml b/bin/schema-tutorial.yaml index 00f2e927ea7d7b..1cc6ca4c99ac4e 100644 --- a/bin/schema-tutorial.yaml +++ b/bin/schema-tutorial.yaml @@ -493,3 +493,41 @@ mapping: sequence: - type: map mapping: *answer_histories + recordings: + type: seq + sequence: + - type: map + mapping: + captioners: + type: seq + sequence: + - type: str + enum: + - CONTRIBUTORS + speakers: + type: seq + sequence: + - type: str + enum: + - CONTRIBUTORS + bot-timestamp: + type: int + date: + type: str + required: true + pattern: /[0-9]{4}-[0-9]{2}-[0-9]{2}/ + galaxy_version: + type: str + pattern: /[0-9]{2}\.[0-9]{1,2}/ + length: + type: str + pattern: /^(?:([0-9]*)[Hh])*(?:([0-9]*)[Mm])*(?:([0-9.]*)[Ss])*$/ + youtube_id: + type: str + pattern: /[A-Za-z0-9_-]{9,13}/ + type: + type: str + archive-id: + type: str + description: + type: str diff --git a/faqs/gtn/gtn_stats.md b/faqs/gtn/gtn_stats.md index 8c15f3d99c0d0c..534cc66794888f 100644 --- a/faqs/gtn/gtn_stats.md +++ b/faqs/gtn/gtn_stats.md @@ -13,6 +13,8 @@ description: Statistics over the GTN {% assign topics = site | list_topics_by_category: "science-technical" | to_keys %} {% assign contributors = site.data['contributors'] | where_exp: "item", "item.halloffame != 'no'" | sort: "joined" %} {% assign learning_pathways = site.pages | where:"layout", "learning-pathway" | where_exp:"item","item.draft != true" %} +{% assign news = site.posts | where:"layout", "news" %} +{% assign videos = site | list_videos %} {% if include.compact %} @@ -52,46 +54,61 @@ description: Statistics over the GTN
      -
      +
      {{ topics | size }}
      Topics
      -
      +
      {{ tutorials | size }}
      Tutorials
      -
      +
      {{ learning_pathways | size }}
      Learning Pathways
      -
      +
      {{ faqs | size }}
      FAQs
      -
      +
      {{ contributors | size }}
      Contributors
      -
      +
      {{ site.age | round: 1}}
      Years
      + +
      +
      +
      {{ news | size }}
      +
      News Posts
      +
      +
      + +
      +
      +
      {{ videos | size }}
      +
      Videos ({{ site | list_videos_total_time | round:1}}h)
      +
      +
      +
      {% endif %} diff --git a/faqs/gtn/images/bad-console.png b/faqs/gtn/images/bad-console.png new file mode 100644 index 00000000000000..37cba8a32bd831 Binary files /dev/null and b/faqs/gtn/images/bad-console.png differ diff --git a/faqs/gtn/images/bad.png b/faqs/gtn/images/bad.png new file mode 100644 index 00000000000000..2ffd575e48563d Binary files /dev/null and b/faqs/gtn/images/bad.png differ diff --git a/faqs/gtn/images/good-console.png b/faqs/gtn/images/good-console.png new file mode 100644 index 00000000000000..7044321bace61c Binary files /dev/null and b/faqs/gtn/images/good-console.png differ diff --git a/faqs/gtn/images/good.png b/faqs/gtn/images/good.png new file mode 100644 index 00000000000000..4efd151c4130a5 Binary files /dev/null and b/faqs/gtn/images/good.png differ diff --git a/faqs/gtn/images/mouse.png b/faqs/gtn/images/mouse.png new file mode 100644 index 00000000000000..91d9da4cead28f Binary files /dev/null and b/faqs/gtn/images/mouse.png differ diff --git a/faqs/gtn/recordings_add.md b/faqs/gtn/recordings_add.md new file mode 100644 index 00000000000000..0652eff26ae113 --- /dev/null +++ b/faqs/gtn/recordings_add.md @@ -0,0 +1,65 @@ +--- +title: Adding your recording to a tutorial or slide deck +area: contributors +layout: faq +box_type: tip +contributors: [shiltemann] +--- + +**We welcome anybody to submit their recordings!** Your videos can be used in (online) training events, or for self-study by learners on the GTN. + +For some tips and tricks about recording the video itself, please see + + + + +#### Submission process + +The process of adding recordings to the GTN is as follows: + +1. **Instructor:** Record video ([tips & tricks]({% link faqs/gtn/recordings_create.md %})) +2. **Instructor:** Submit your video using this [Google Form](https://forms.gle/qNG8FkTN1yRZPNZY6) +3. **GTN:** A [GTN GitHub pull request (PR)](https://github.com/galaxyproject/training-material/pulls) will be made by our bot based on the form. +4. **GTN:**: We will upload your video to the [GalaxyProject YouTube channel](https://www.youtube.com/c/galaxyproject) +5. **GTN:**: We will put the auto-generated captions from YouTube into a Google Doc +6. **Instructor:**: Check and fix the auto-generated captions +7. **GTN:** Upload the fixed captions to YouTube +8. **GTN:** Merge the Pull Request on GitHub +9. Done! Your recording will now show up on the tutorial for anybody to use and re-use + + +**Note:** If you are submitting a video to use in an event, please submit your recording **2 weeks before the start of your course** to allow ample time to complete the submission process. + +#### Recordings Metadata + +Our bot will add some metadata about your recording to the tutorial or slide deck in question, and looks as follows: + +``` +recordings: + - speakers: # speakers must be defined in the CONTRIBUTORS.yaml file + - shiltemann + - hexylena + captioners: # captioners must also be present in the CONTRIBUTORS.yaml file + - bebatut + type: # optional, will default to Tutorial or Lecture, but if you do something different, set it here (e.g. Demo, Lecture & Tutorial, Background, Webinar) + date: '2024-06-12' # date on which you recorded the video + galaxy_version: '24.0' # version of Galaxy you used during the recording, can be found under 'Help->About' in Galaxy + length: 1H17M # length of your video, in format: 17M or 2H34M etc + youtube_id: "dQw4w9WgXcQ" # the bit of the YouTube URL after youtube.com/watch?v= + + - speakers: + - shiltemann + captioners: + - hexylena + - bebatut + date: '2020-06-12' + galaxy_version: '20.05' + length: 51M + youtube_id: "oAVjF_7ensg" + +``` + +#### Misc + +**Note:** If your videos are already uploaded to YouTube, for example as part of a different project's account, you can add this metadata to the tutorial or slides manually, without using our submission form. +Note that we do require all videos to have good-quality English captions, and we will not be able to help you configure these on other YouTube accounts. diff --git a/faqs/gtn/recordings_create.md b/faqs/gtn/recordings_create.md new file mode 100644 index 00000000000000..51f8c198bafac7 --- /dev/null +++ b/faqs/gtn/recordings_create.md @@ -0,0 +1,118 @@ +--- +title: Recording a video tutorial +area: contributors +layout: faq +box_type: tip +contributors: [shiltemann, hexylena] +--- + +This FAQ describes some **general guidelines** for recording your video + +**Anybody is welcome to record one of the GTN tutorials**, even if another recording already exists! +Both the GTN tutorial and Galaxy itself change significantly over time, and having regular and/or multiple recordings of tutorials is great! + +**Done with your recording?** Check out the instructions for adding it to the GTN: + + + + +#### Video content + +1. **Start of video** + - **Introduce yourself** + - Discuss the **questions and learning objectives** of the tutorial + - Give a **basic introducion about the topic**, many participants will be novices + +2. **Guide the learners through the tutorial step by step** + - Explain the scientific background of the analysis + - Explain where you are clicking in Galaxy + - Explain what tool parameters mean + - Explain what the tool does + - Discuss the output files + - Discuss how to interpret the results + - Discuss question boxes from the tutorial + +3. **Speak slowly and clearly** + - Take your time, we are not in a hurry + - It is often a lot of new information for participants, give them a chance to process all of it + - Speaking slowly and clearly will improve the quality of the auto-generated captions, and will be less work for you to fix captions. + +4. **If things go wrong that is OK!** + - It's a great teaching moment! + - Explain the steps you are taking to determine what went wrong, and how you are fixing it. + - It makes participants feel less bad if things go wrong for them + +5. **If your tutorial is long** + - Indicate good places for people to take a break + - e.g. when a tool takes a while to run + +6. **End of video** + - Go over some of the **take-home messages (key-points)** of the tutorial + - Remind viewers about the **feedback form** embedded at the end of the tutorial + - Share your recommendations for **follow-up tutorials** + - Share any other tips for where to learn more about the topic + - Share **how to connect with the community** (e.g. Matrix, Help Forum, social media, etc) + +7. If you are doing both a lecture and a hands-on training, please create 2 separate videos + + +#### Technical Guidelines + +1. Start a [Zoom](https://zoom.us/) call with yourself, record that. + - For Mac users, QuickTime Player is also a nice option. + - Have another preference like OBS? Totally OK too! + - We recommend zoom to folks new to video production as it is the easiest to get started and produces quite small file sizes. + +2. Do a short **test recording** first + - Is the **audio quality** good enough? + - Wearing a headset often improves the audio quality. + - **Screen sharing:** is your screen readable? + - Make sure you **zoom in** enough for it to be clearly visible what you are doing in Galaxy. + - Test watching the video in a non-maximised window. Is it still legible? + - If the participant is using 50% of their screen for the video, 50% for Galaxy, will it be legible? + +3. Need to edit your video after recording? + - For example to merge multiple videos together? + - Software like [KDEnlive](https://kdenlive.org/en/) can help here. + - Feel free to ask us for help if you need! + + +#### Standards + +1. **Zoom in**, in every interface you're covering! Many people will be watching the video while they're doing the activity, and won't have significant monitor space. Which video below would you rather be trying to follow? + + Bad | Good 😍 + --- | --- + ![default size screenshot of usegalaxy.eu]({% link faqs/gtn/images/bad.png %}) | ![zoomed in screenshot of usegalaxy.eu, now much more legible]({% link faqs/gtn/images/good.png %}) + + Bad | Good 🤩 + --- | --- + ![green text on black background console with tiny font]({% link faqs/gtn/images/bad-console.png %}) | ![zoomed in screenshot of a console with high contrast black and white content]({% link faqs/gtn/images/good-console.png %}) + +2. (Especially for introductory videos!) Clearly call out what you're doing, especially on the first occurrence + + Bad | Good + --- | --- + "Re-run the job" | "We need to re-run the job which we can do by first clicking to expand the dataset, and then using the re-run job button which looks like a refresh icon." + + Bad | Good + --- | --- + "As you can see here the report says X" | "I'm going to view the output of this tool, click on the eyeball icon, and as you can see the report says X." + + But the same goes for terminal aliases, please disable all of your favourite terminal aliases and quick shortcuts that you're used to using, disable your bashrc, etc. These are all things students will try and type, and will fail in doing so. We need to be very clear and explicit because people will type exactly what is on the screen, and their environment should at minimum match yours. + + Bad | Good + --- | --- + `lg file`| `ls -al | grep file` + `z galaxy`| `cd path/to/the/galaxy` + +3. Consider using a pointer that is more visually highlighted. + + ![mouse pointer with circle around it that follows it around]({% link faqs/gtn/images/mouse.png %}) + + There are themes available for your mouse pointer that you can temporarily use while recording that can make it easier for watchers to see what you're doing. + + - [Windows](https://www.microsoft.com/en-us/p/mouse-pointer-highlight/9p7sb9s4rq7z?activetab=pivot:overviewtab) + - [Linux](https://askubuntu.com/questions/777896/how-do-i-highlight-my-mouse-pointer-while-screen-recording/917587#917587) + + diff --git a/metadata/video-library-unimported-vids.txt b/metadata/video-library-unimported-vids.txt new file mode 100644 index 00000000000000..db3662c732ac10 --- /dev/null +++ b/metadata/video-library-unimported-vids.txt @@ -0,0 +1,1706 @@ +# associate with event? +admin/week-overview-slides: + description: null + materials: + - link: topics/admin/tutorials/ansible-galaxy/slides.html + type: Week Overview Slides + support_channel: admin_ansible-galaxy + tags: + - admin + title: Admin Training Week Overview + type: Tutorial + versions: + - captions: + - shiltemann + date: '2021-02-15' + galaxy_version: '21.01' + length: 4M + link: JN-C5UbHthY + speakers: + - hexylena + +admin/welcome: + description: 'This supplementary welcome will give you additional details that are + needed for the week. + + + Please note that when you use the mentioned /completed command, you do not need + to specify a history URL. Just `/completed` is sufficient. + + ' + materials: [] + support_channel: event-gat + tags: + - admin + title: Admin Welcome + type: Lecture + versions: + - captions: + - hexylena + date: '2021-02-15' + galaxy_version: '21.01' + length: 4M + link: JN-C5UbHthY + speakers: + - hexylena + - martenson + - slugger70 + - nsoranzo + - shiltemann + - gmauro + - cat-bro + +# make a community 'tutorial' / slide deck and associate this vid? +community/welcome: +description: This short video gives an overview of the worldwide Galaxy community, + and different ways you can get involved! Video created by Beatriz Serrano-Solano. +materials: +- external: true + link: https://galaxyproject.org + title: galaxyproject.org + type: GalaxyProject Home +- external: true + link: https://galaxyproject.org/use/ + title: Find a Galaxy server near you! + type: Galaxy servers +- external: true + link: https://training.galaxyproject.org + title: Start learning Galaxy + type: Galaxy Training Network +- external: true + link: https://help.galaxyproject.org/ + title: Galaxy Help Forum + type: Support +- external: true + link: https://www.zotero.org/groups/1732893/galaxy + title: Zotero + type: Galaxy Publications +- external: true + link: https://galaxyproject.org/community/ + title: Find and Join a community + type: Galaxy Communities +- external: true + link: https://galaxyproject.org/events/ + title: Galaxy Event Horizon + type: Events +- external: true + link: https://galaxyproject.org/mailing-lists/ + title: Stay informed of Galaxy activity! + type: Mailing Lists +- external: true + link: https://galaxyproject.org/community/wg/ + title: Find and join a WG + type: Galaxy Working Groups +- external: true + link: https://github.com/galaxyproject + title: galaxyproject + type: GitHub +- external: true + link: https://gitter.im/galaxyproject/Lobby + title: Start talking with the Galaxy Community! + type: Gitter Chat +- external: true + link: https://gitter.im/Galaxy-Training-Network/Lobby + title: Join the training discussion here! + type: GTN Gitter +- external: true + link: https://galaxyproject.org/events/2021-02-papercuts/ + title: Monthly Collaboration Fest + type: PaperCuts +- external: true + link: https://github.com/galaxyproject/training-material/issues/1712 + title: Everybody Welcome! + type: GTN CoFest +- external: true + link: https://twitter.com/galaxyproject + title: follow @galaxyproject + type: GalaxyProject Twitter +- external: true + link: https://twitter.com/gxytraining + title: follow the GTN @gxytraining + type: GTN Twitter +tags: +- community +title: Meet & Join the Galaxy Community! +type: Welcome +versions: +- captions: + - shiltemann + date: '2021-02-15' + galaxy_version: '21.01' + length: 5M + link: -1MPdxmRs8U + speakers: + - galaxycommunity + +# associate with FAQ? +data-science/rstudio-galaxy: + description: This tutorial will show you how you can start Rstudio from within Galaxy. + This option is only available on Galaxy EU for the time being. If you are working + on a different Galaxy server, you can use Rstudio Cloud (https://rstudio.cloud/) + materials: + - external: true + link: https://rstudio.cloud/ + title: Try Rstudio Cloud instead + type: Rstudio in Galaxy not available? + tags: + - galaxy-interface + title: Running RStudio on Galaxy + type: Demo + versions: + - captions: + - shiltemann + date: '2021-02-15' + galaxy_version: '21.01' + length: 20M + link: vN14K74nFZc + speakers: + - fpsom + +# John's architecture vids +# put in GalaxyProject YoutTube, add captions, combine into 1 vid or support playlists in gtn recordings metadata? +dev/architecture/slides-app-di: +description: TODO +materials: +- link: topics/dev/tutorials/architecture/slides.html + type: Slides +- link: topics/dev/tutorials/core-contributing/tutorial.html + title: Contributing to Galaxy Core + type: Self-study tutorial +- external: true + link: https://bit.ly/gx-arch-vids + title: Always contains the latest version of these lectures. + type: Youtube Playlist +tags: +- dev +title: 'Galaxy Code Architecture: app and Dependency Injection' +type: Lecture +versions: +- captions: null + date: '2021-02-15' + galaxy_version: '21.01' + length: 15M + link: Yjl4MIo95nY + speakers: + - jmchilton + +dev/architecture/slides-ecosystem: +description: TODO +materials: +- link: topics/dev/tutorials/architecture/slides.html + type: Slides +- link: topics/dev/tutorials/core-contributing/tutorial.html + title: Contributing to Galaxy Core + type: Self-study tutorial +- external: true + link: https://bit.ly/gx-arch-vids + title: Always contains the latest version of these lectures. + type: Youtube Playlist +tags: +- dev +title: 'Galaxy Code Architecture: Galaxy Ecosystem' +type: Lecture +versions: +- captions: null + date: '2021-02-15' + galaxy_version: '21.01' + length: 25M + link: WbC2NorVKr0 + speakers: + - jmchilton + +dev/architecture/slides-files-directories: +description: TODO +materials: +- link: topics/dev/tutorials/architecture/slides.html + type: Slides +- link: topics/dev/tutorials/core-contributing/tutorial.html + title: Contributing to Galaxy Core + type: Self-study tutorial +- external: true + link: https://bit.ly/gx-arch-vids + title: Always contains the latest version of these lectures. + type: Youtube Playlist +tags: +- dev +title: 'Galaxy Code Architecture: Files and Directories' +type: Lecture Series +versions: +- captions: null + date: '2021-02-15' + galaxy_version: '21.01' + length: 17M + link: Nrk0o4EIeUU + speakers: + - jmchilton + +dev/architecture/slides-models-managers: +description: TODO +materials: +- link: topics/dev/tutorials/architecture/slides.html + type: Slides +- link: topics/dev/tutorials/core-contributing/tutorial.html + title: Contributing to Galaxy Core + type: Self-study tutorial +- external: true + link: https://bit.ly/gx-arch-vids + title: Always contains the latest version of these lectures. + type: Youtube Playlist +tags: +- dev +title: 'Galaxy Code Architecture: Models and Managers' +type: Lecture +versions: +- captions: null + date: '2021-02-15' + galaxy_version: '21.01' + length: 15M + link: joiiHN9LlVo + speakers: + - jmchilton + +dev/architecture/slides-pluggable-components: +description: TODO +materials: +- link: topics/dev/tutorials/architecture/slides.html + type: Slides +- link: topics/dev/tutorials/core-contributing/tutorial.html + title: Contributing to Galaxy Core + type: Self-study tutorial +- external: true + link: https://bit.ly/gx-arch-vids + title: Always contains the latest version of these lectures. + type: Youtube Playlist +tags: +- dev +title: 'Galaxy Code Architecture: Pluggable Components' +type: Lecture +versions: +- captions: null + date: '2021-02-15' + galaxy_version: '21.01' + length: 45M + link: wiKv9MDNjA4 + speakers: + - jmchilton + +dev/architecture/slides-principles: +description: TODO +materials: +- link: topics/dev/tutorials/architecture/slides.html + type: Slides +- link: topics/dev/tutorials/core-contributing/tutorial.html + title: Contributing to Galaxy Core + type: Self-study tutorial +- external: true + link: https://bit.ly/gx-arch-vids + title: Always contains the latest version of these lectures. + type: Youtube Playlist +tags: +- dev +title: 'Galaxy Code Architecture: Principles' +type: Lecture +versions: +- captions: + - shiltemann + date: '2021-02-15' + galaxy_version: '21.01' + length: 5M + link: TKs4IF4ZbeQ + speakers: + - jmchilton + +dev/architecture/slides-project-management: +description: TODO +materials: +- link: topics/dev/tutorials/architecture/slides.html + type: Slides +- link: topics/dev/tutorials/core-contributing/tutorial.html + title: Contributing to Galaxy Core + type: Self-study tutorial +- external: true + link: https://bit.ly/gx-arch-vids + title: Always contains the latest version of these lectures. + type: Youtube Playlist +tags: +- dev +title: 'Galaxy Code Architecture: Project Management' +type: Lecture +versions: +- captions: + - malloryfreeberg + date: '2021-02-15' + galaxy_version: '21.01' + length: 25M + link: ElOys4F7bzY + speakers: + - jmchilton + +dev/architecture/slides-web-client: +description: TODO +materials: +- link: topics/dev/tutorials/architecture/slides.html + type: Slides +- link: topics/dev/tutorials/core-contributing/tutorial.html + title: Contributing to Galaxy Core + type: Self-study tutorial +- external: true + link: https://bit.ly/gx-arch-vids + title: Always contains the latest version of these lectures. + type: Youtube Playlist +tags: +- dev +title: 'Galaxy Code Architecture: Web Client' +type: Lecture +versions: +- captions: null + date: '2021-02-15' + galaxy_version: '21.01' + length: 15M + link: 9auQWM3tS8I + speakers: + - jmchilton + +dev/architecture/slides-web-requests: +description: TODO +materials: +- link: topics/dev/tutorials/architecture/slides.html + type: Slides +- link: topics/dev/tutorials/core-contributing/tutorial.html + title: Contributing to Galaxy Core + type: Self-study tutorial +- external: true + link: https://bit.ly/gx-arch-vids + title: Always contains the latest version of these lectures. + type: Youtube Playlist +tags: +- dev +title: 'Galaxy Code Architecture: Web Requests' +type: Lecture +versions: +- captions: null + date: '2021-02-15' + galaxy_version: '21.01' + length: 35M + link: vwsxkDMiA10 + speakers: + - jmchilton + +# debugging videos +# create a playlist with them on YouTube, support playlist in recordings metadata +# or: merge into one video and add to YouTube +dev/debugging/tutorial/api-test-failure: +description: What to do when an API test fails. +materials: +- link: topics/dev/tutorials/debugging/tutorial.html + type: Tutorial +tags: +- dev +title: API tests +type: Tutorial +versions: +- captions: + - jdavcs + date: '2021-02-15' + galaxy_version: '21.01' + length: 23M + link: vYFiFcEuLI0 + speakers: + - jdavcs + +dev/debugging/tutorial/client-linter: +description: This video covers working through the client linting error hit in the + previous tutorial. +materials: +- link: topics/dev/tutorials/debugging/tutorial.html + type: Tutorial +tags: +- dev +title: Client Linting +type: Tutorial +versions: +- captions: + - beatrizserrano + date: '2021-02-15' + galaxy_version: '21.01' + length: 6M + link: YKyP5Y4Y4js + speakers: + - assuntad23 + +dev/debugging/tutorial/client-unit-test: +description: What to do when your client throws a unit test failure. +materials: +- link: topics/dev/tutorials/debugging/tutorial.html + type: Tutorial +tags: +- dev +title: Client Unit Tests +type: Tutorial +versions: +- captions: + - beatrizserrano + date: '2021-02-15' + galaxy_version: '21.01' + length: 6M + link: A6LSTCb-rGg + speakers: + - assuntad23 + +dev/debugging/tutorial/intro: +description: This is the first video in the Debugging Galaxy series. +materials: +- link: topics/dev/tutorials/debugging/tutorial.html + type: Tutorial +tags: +- dev +title: Introduction +type: Tutorial +versions: +- captions: + - tnabtaf + date: '2021-02-15' + galaxy_version: '21.01' + length: 11M + link: FlFF3TPEfC8 + speakers: + - assuntad23 + - jdavcs + +dev/debugging/tutorial/runtime-error: +description: How to fix a runtime error +materials: +- link: topics/dev/tutorials/debugging/tutorial.html + type: Tutorial +tags: +- dev +title: Runtime Errors +type: Tutorial +versions: +- captions: + - jdavcs + date: '2021-02-15' + galaxy_version: '21.01' + length: 40M + link: S7N1WvYGJik + speakers: + - jdavcs + +dev/debugging/tutorial/selenium: +description: Selenium is used to test Galaxy development. Learn more about test failure + and how to address them. +materials: +- link: topics/dev/tutorials/debugging/tutorial.html + type: Tutorial +tags: +- dev +title: Selenium +type: Tutorial +versions: +- captions: + - assuntad23 + - jdavcs + - tnabtaf + - beatrizserrano + date: '2021-02-15' + galaxy_version: '21.01' + length: 18M + link: 5AAHjxEZb-Y + speakers: + - assuntad23 + +dev/debugging/tutorial/summary: +description: A summary of wisdom to debug Galaxy with. +materials: +- link: topics/dev/tutorials/debugging/tutorial.html + type: Tutorial +tags: +- dev +title: Debugging Wisdom +type: Lecture +versions: +- captions: + - tnabtaf + date: '2021-02-15' + galaxy_version: '21.01' + length: 3M + link: _sH1uTKf4ec + speakers: + - assuntad23 + - jdavcs + +dev/debugging/tutorial/unit-test-failure: +description: What to do when a unit test fails. +materials: +- link: topics/dev/tutorials/debugging/tutorial.html + type: Tutorial +tags: +- dev +title: Unit Testing +type: Tutorial +versions: +- captions: + - jdavcs + date: '2021-02-15' + galaxy_version: '21.01' + length: 14M + link: BiANBC78EXM + speakers: + - jdavcs + +# associate with tutorials? associate with faq? +galaxy-interface/interactive-tools: +description: This demo videos shows you how you can run Interactive tools such as + RStudio and Jupyter notebooks within Galaxy. +tags: +- galaxy-intro +- galaxy +title: Interactive Tools +type: Demo +versions: +- captions: + - shiltemann + date: '2022-07-07' + length: 15M + link: 21Eo3QlKuoA + speakers: + - hexylena + +# not assiated with any specific GTN material +galaxy-interface/upload-to-ena: +description: "So you\u2019ve used Galaxy workflows to analyze your SARS-CoV-2 samples? + Learn in this tutorial how to export results to your favorite datastore." +materials: [] +tags: +- sars-cov2 +- galaxy-interface +title: Uploading data to ENA +type: Lecture & Tutorial +versions: +- captions: + - hexylena + date: '2021-08-09' + length: 10M + link: -5U0sINjoig + speakers: + - wm75 + +# associate with topic? +# get Anton's slides into GTN? +galaxy/intro: +description: This video will introduce the Galaxy data analysis platform, and give + a short demo on how to use it. +materials: +- link: topics/introduction/tutorials/galaxy-intro-short/slides.html + title: A Short Introduction to Galaxy + type: Related Slides +tags: +- galaxy-intro +- galaxy +title: A Very Short Introduction to Galaxy +type: Lecture & Demo +versions: +- captions: + - nekrut + date: '2021-02-15' + galaxy_version: '21.01' + length: 10M + link: VZoz3k5EehI + speakers: + - nekrut + + +# associate with TNAseq slides? +# import slides used for lecture? +genome-annotation/tnseq-lecture: +description: 'This lecture talks about the basics of TnSeq analysis + + ' +materials: +- external: true + link: https://www.dropbox.com/sh/ayix345v11n69es/AABJerasUhiD4JfSgS3g0kS7a?dl=0&preview=TnSeq-GTapas-2022.pdf + type: Non-GTN Lecture +tags: +- genome-annotation +title: Essential genes detection with Transposon insertion sequencing (Lecture) +type: Lecture +versions: +- captions: + - delphine-l + date: '2022-03-10' + galaxy_version: '21.09' + length: 10M + link: ehqObidPhcE + speakers: + - delphine-l + + +# not sure what to do with this one +# make a misc pages with webinars and assorted vids? +metagenomics/clinical-applications: +description: Nanopore Sequencing has many applications in a clinical setting. In this + lecture, Astrid Heikema discusses how her group at the Erasmus Medical Center uses + Nanopore for sequencing of bacterial Genomes. +materials: [] +tags: +- metagenomics +title: Nanopore Whole Bacterial Genome Sequencing in a Clinical Setting +type: Lecture +versions: +- captions: + - hexylena + date: '2021-02-15' + galaxy_version: '21.01' + length: 30M + link: icXMqMut6PY + speakers: + - aheikema + +# proteomics vids +# discuss with Galaxy-P team where to put them +# they will probably record new vids for Galaxy academy +proteogenomics/introduction/slides: +description: In this opening presentation, the basic components of proteogenomics + are described, including the main steps in the bioinformatics analysis workflow + that make up this approach and will be detailed in the following tutorials. Some + examples of research questions that benefit from a proteogenomics approach are also + highlighted. +materials: +- external: true + link: https://drive.google.com/file/d/1rqzf6Mrk9cMuQ2531tM6RpltPLrDCrgh/view?usp=sharing + type: Slides +tags: +- proteogenomics +title: Introduction to Proteogenomics +type: Lecture +versions: +- captions: + - emmaleith + date: '2021-02-15' + galaxy_version: '21.01' + length: 20M + link: WpH101OPrG0 + speakers: + - timothygriffin + +# no slides in the GTN yet, add first? +proteomics/encyclopedia/slides: +description: This lecture will introduce EncyclopeDIA. +materials: +- link: topics/proteomics/tutorials/encyclopedia/tutorial.html + type: Tutorial +tags: +- proteomics +type: Lecture +versions: +- captions: + - emmaleith + date: '2021-06-25' + galaxy_version: '21.01' + length: 20M + link: EGWTFWeI5gg + speakers: + - jj-umn + - pratikdjagtap + + +# not associated with a GTN material +proteomics/pandemic/external: +description: "The Coronavirus Disease 2019 (COVID-19) global pandemic has had a profound, + lasting impact on the world's population. Accurate and timely diagnosis of COVID-19 + infection is an important step for providing care and containing its further spread. + In this tutorial, attendees will be introduced to two workflows \u2013 a) database + search workflow and b) peptide validation workflow. Attendees of the workshop will + get an in-depth knowledge of the Galaxy workflows that detect SARS-CoV-2 peptides + ([10.1186/s12014-021-09321-1)](https://doi.org/10.1186/s12014-021-09321-1)) and + co-infecting pathogen peptides ([10.1021/acs.jproteome.0c00822](https://doi.org/10.1021/acs.jproteome.0c00822))." +materials: +- external: true + link: https://drive.google.com/file/d/1anBPmGRWEVp9pBLZ_JInWV7iWrBpyCDs/view?usp=sharing + type: Slides +- external: true + link: https://usegalaxy.eu/workflow/export_to_file?id=8a85663218676b59 + type: Workflow +tags: +- proteomics +title: Pandemics Research using Mass Spectrometry +type: Lecture & Demo +versions: +- captions: + - emmaleith + date: '2021-02-15' + galaxy_version: '21.01' + length: 35M + link: CI35gTmZoqM + speakers: + - timothygriffin + - subinamehta + - andrewr + - pratikdjagtap + + +# SARS-CoV 2 vids: +# not assoicated with GTN materials +sars-cov2/observablehq: +description: 'In this demo you will get to know the ObservableHQ platform for interactive + data visualization. You will see how covid19.galaxyproject.org uses it to build + a dashboard for their SARS-CoV-2 analysis efforts and will learn how to customize + this solution to fit your own purposes. + + ' +materials: [] +tags: +- sars-cov2 +title: Using and Customising ObservableHQ +type: Demo +versions: +- captions: + - assuntad23 + date: '2021-12-08' + galaxy_version: '21.01' + length: 15M + link: owZ8AToX4sE + speakers: + - sergeipond + + +sars-cov2/upload-ena: +description: '' +materials: [] +tags: +- sars-cov2 +title: Upload to ENA +type: Demo +versions: +- captions: + - mariipia10 + - assuntad23 + date: '2021-12-08' + galaxy_version: '21.01' + length: 10M + link: sEjhWAtmAn4 + speakers: + - miguelroncoroni + +sars-cov2/usegalaxy-star-bot: +description: "See in this demo how, on usegalaxy.*, we\u2019ve used Planemo and Bioblend + to build and operate an automated SARS-CoV-2 genome surveillance system based on + the Galaxy workflows for variant calling, consensus building and reporting." +materials: [] +tags: +- sars-cov2 +title: The usegalaxy.* SARS-CoV-2 Bot in Action +type: Demo +versions: +- captions: + - hexylena + date: '2021-08-09' + length: 40M + link: IRxja8bZ-MU + speakers: + - wm75 + +sars-cov2/using-galaxy: +description: 'Get an overview of the workshop: production-ready Galaxy workflows for + SARS-CoV-2 sequencing data, tools you should know to automate workflow execution, + and how you combine all of it to turn Galaxy into a platform for genome-surveillance' +materials: [] +tags: +- sars-cov2 +title: SARS-CoV-2 Monitoring and Analysis with Galaxy +type: Tutorial +versions: +- captions: + - mariipia10 + date: '2021-08-09' + length: 13M + link: luxFraFJTc4 + speakers: + - wm75 + +sars-cov2/viral-beacon/slides: +description: "How to visualize tens of thousands of SARS-CoV-2 analysis results? Learn + about the Viral Beacon project\u2019s solution!\n" +materials: +- external: true + link: https://drive.google.com/file/d/1yCHOi1EGKpkH-3XpKTKNKpFjwYAWKSVx/preview +tags: +- sars-cov2 +title: Introduction to viral Beacon +type: Demo +versions: +- captions: + - assuntad23 + date: '2021-12-08' + galaxy_version: '21.01' + length: 24M + link: R_4yUMPk7eY + speakers: + - babitasingh + +# not associated with GTN materials +virology/sequencing-spectrum-viral-genomes: +title: Sequencing data analysis for a spectrum of viral genomes +description: null +type: Lecture +tags: +- variant-analysis +- virology +versions: +- link: CAwldVPimkY + speakers: + - wm75 + date: '2023-05-04' + length: 12M + captions: + - wm75 + +# not associated with GTN materials +one-health/galaxy-pathogen-surveillance: +title: An automated SARS-CoV-2 genome surveillance system built around Galaxy +description: null +type: Lecture +materials: +- external: true + link: https://www.infectious-diseases-toolkit.org/showcase/covid19-galaxy + title: 'IDTk Showcase: An automated SARS-CoV-2 genome surveillance system built + around Galaxy' + type: Additional Info +tags: +- one-health +versions: +- link: -TiqrPJtaxE + speakers: + - wm75 + date: '2023-05-19' + length: 10M + captions: + - wm75 + + + + + +# which tutorial does this fit with best? +statistics/machine-learning/intro: +description: The lecture explains introductory concepts in machine learning such as + supervised and unsupervised learning, classification and regression, hyperparameter + optimisation, cross-validation, train, test and validation sets. +materials: +- external: true + link: https://docs.google.com/presentation/d/1RZtXFk8qz6wmPsIKnMte3Y4SvmI_nD2vIyeSEvYR6RA/edit?usp=sharing + title: Introduction to Machine Learning + type: Slides +support_channel: machine_learning_introduction +tags: +- statistics +title: Introduction to Machine Learning +type: Lecture +versions: +- captions: + - anuprulez + date: '2021-02-15' + galaxy_version: '21.01' + length: 15M + link: ix58oj8n8nA + speakers: + - anuprulez + + +# slides not in GTN +variant-analysis/tb-variant-analysis/slides: +description: "Tuberculosis (TB) is an infectious disease caused by the bacterium Mycobacterium + tuberculosis. According to the WHO, in 2018 there were 10.0 million new cases of + TB worldwide and 1.4 million deaths due to the disease, making TB the world\u2019s + most deadly infectious disease." +materials: +- external: true + link: https://docs.google.com/presentation/d/16XTMh2fRrl73WVaezNAFWET0IIMXBv0MCI2engFtdQs/edit#slide=id.g6231f48ea2_0_34 + type: Slides +tags: +- variant-analysis +type: Lecture +versions: +- captions: + - shiltemann + date: '2021-02-15' + galaxy_version: '21.01' + length: 20M + link: yCC9EmLuyJg + speakers: + - pvanheus + + +# make a webinar page for these? +webinar/admins: +materials: +- external: true + link: https://galaxyproject.org/events/webinars/ + title: View past & upcoming webinars + type: Galaxy Webinar Page +tags: +- webinar +title: Galaxy Resources for.. Admins +type: Webinar +versions: +- captions: + - galaxycommunity + date: '2021-05-12' + length: 1H5M + link: vx2icgaU_h0 + speakers: + - gmauro + - lldelisle + +webinar/educators: +materials: +- external: true + link: https://galaxyproject.org/events/webinars/ + title: View past & upcoming webinars + type: Galaxy Webinar Page +tags: +- webinar +title: Galaxy Resources for.. Educators +type: Webinar +versions: +- captions: + - shiltemann + date: '2021-04-28' + length: 1H10M + link: AXynJtPXgRQ + speakers: + - shiltemann + - miaomiaozhou88 + - christinecucinotta + - subinamehta + - bebatut + +webinar/researchers: +description: Wondering how to get started, or how to do a particular type of analysis, + or how to scale to thousands of datasets? This webinar will highlight how to find + the resources and help you need for each of these questions and more. +materials: +- external: true + link: https://docs.google.com/presentation/d/1dgKt1KJEazVPLmUXoXDUKgQl4hu1-Mute_AhSt183lQ/edit?usp=sharing + title: Slides from this webinar +- external: true + link: https://galaxyproject.org/events/webinars/ + title: View past & upcoming webinars + type: Galaxy Webinar Page +tags: +- webinar +title: Galaxy Resources for.. Researchers +type: Webinar +versions: +- captions: + - shiltemann + date: '2021-04-21' + length: 1H10M + link: cFN_WPGz9qk + speakers: + - tnabtaf + +webinar/tooldevs: +materials: +- external: true + link: https://galaxyproject.org/events/webinars/ + title: View past & upcoming webinars + type: Galaxy Webinar Page +tags: +- webinar +title: Galaxy Resources for.. Tool Developers +type: Webinar +versions: +- captions: + - hexylena + date: '2021-05-26' + length: 59M + link: MdQyltMAocg + speakers: + - blankenberg + - abretaud + - petrnovak + + +# not sure where to put these +# add Miaomiao's slides to GTN? +wetlab/miseq/demo: +description: This video will show you the full process of sequencing a bacterial genome + using the Oxford Nanopore MinION sequencer. +materials: [] +tags: +- wetlab +title: MinION Wetlab demo +type: Demo +versions: +- captions: + - shiltemann + date: '2021-02-15' + length: 20M + link: bZfEyd87xCE + speakers: + - miaomiaozhou88 + + + + + +# which gtn material does this fit best with? +ro-crates/workflowhub/tutorial: +description: null +materials: +- link: https://doi.org/10.5281/zenodo.7787488 +- link: https://about.workflowhub.eu/docs/registering-a-workflow/ +tags: +- ro-crate +- workflowhub +title: Registering Galaxy workflows in WorkflowHub +type: Tutorial +versions: +- captions: + - biocommons + date: '2023-03-21' + length: 35M + link: 2kGKxaPuQN8 + speakers: + - supernord +support_channel: ro-crate +cover: + link: rocrate.png + alt: RO Crate logo + + +# Discuss with Maria what to do with these +# add bioconductor tutorials as external tutos? +bioconductor/setup: +title: "Getting set up for the Bioconductor Sm\xF6rg\xE5sbord tutorials" +description: This video will help you get setup on the BioConductor Galaxy to run + the tutorials in this module. +type: Demo +support_channel: bioconductor +tags: +- bioconductor +cover: + link: bioconductor-session.jpg + alt: Bioconductor logo +materials: +- link: https://htmlpreview.github.io/?https://github.com/Bioconductor/workshop-contributions/blob/main/welcome.html + title: Go to the Bioconductor Galaxy +versions: +- link: 2ErhMCctZaY + length: 5M + date: '2023-05-05' + speakers: + - almahmoud + captions: + - bioc + +bioconductor/bioc-intro/tutorial: +title: Introduction to data analysis with R and Bioconductor +type: Tutorial +instructors: +- lgatto +cover: + link: bioconductor-session.jpg + alt: Bioconductor logo +description: 'The Data science lesson is based on the Carpentries Ecology Curriculum. + + There are no pre-requisites for this module, and the materials assume no prior + + knowledge about R and Bioconductor. It introduces R, RStudio, teaches data + + cleaning, management, analysis, and visualisation and introduces some + + Bioconductor concepts. + + ' +materials: +- link: https://carpentries-incubator.github.io/bioc-intro/ + title: Introduction to data analysis with R and Bioconductor + external: true +tags: +- bioconductor +- r +support_channel: bioconductor +video_support_channels: +- https://support.bioconductor.org + +bioconductor/bioc-project/tutorial: +title: The Bioconductor project +type: Tutorial +instructors: +- kevinrue +cover: + link: bioconductor-session.jpg + alt: Bioconductor logo +description: 'The Bioconductor project lesson provides an introduction to the Bioconductor + + project such as the Bioconductor home page, packages, package landing pages, + + and package vignettes, where to find help, Bioconductor workflows, + + Bioconductor release schedule and versions, some core infrastructure,.. + + It is meant to be use in combination with other modules as part of a wider + + workshop. + + ' +materials: +- link: https://carpentries-incubator.github.io/bioc-project/ + title: The Bioconductor project +tags: +- bioconductor +- r +support_channel: bioconductor +video_support_channels: +- https://support.bioconductor.org + +bioconductor/rformassspectrometry/tutorial: +title: R for Mass Spectrometry +type: Tutorial +instructors: +- lgatto +cover: + link: bioconductor-session.jpg + alt: Bioconductor logo +description: 'This material introduces participants to the analysis and exploration + of mass + + spectrometry (MS) based proteomics data using R and Bioconductor. The course + + will cover all levels of MS data, from raw data to identification and + + quantitation data, up to the statistical interpretation of a typical shotgun + + MS experiment and will focus on hands-on tutorials. At the end of this course, + + the participants will be able to manipulate MS data in R and use existing + + packages for their exploratory and statistical proteomics data analysis. + + ' +materials: +- link: https://rformassspectrometry.github.io/docs/ + title: R for Mass Spectrometry +tags: +- bioconductor +- r +support_channel: bioconductor +video_support_channels: +- https://support.bioconductor.org + +bioconductor/isee/tutorial: +description: 'This workshop demonstrates the use of the iSEE package to create + + and configure interactive applications for the exploration of various types of + + genomics data sets (e.g., bulk and single-cell RNA-seq, CyTOF, gene expression + + microarray). This workshop is presented as a lab session that combines an + + instructor-led live demo, followed by hands-on experimentation guided by + + completely worked examples and stand-alone notes that participants may + + continue to use after the workshop. + + + The instructor-led live demo comprises three parts: + + 1. Brief lecture on the package concept and functionality + + 2. Overview of the graphical user interface + + 3. Instructions to preconfigure iSEE apps + + + The hands-on lab comprises three parts: + + 1. Inspection of single-cell RNA-seq data at various steps of a typical + + computational workflow, including quality control and dimensionality reduction + + 2. Addition of custom panels to the user interface for advanced visualization. + + 3. Additional questions from the participants, including individual use cases + + and suggestions for future developments + + ' +materials: +- link: https://isee.github.io/iSEEWorkshop2020/ + title: Interactive visualization of SummarizedExperiment objects with iSEE +tags: +- bioconductor +- r +title: Interactive visualization of SummarizedExperiment objects with iSEE +type: Tutorial +versions: +- captions: null + date: '2020-07-28' + length: 55M + link: qmoJtL8b438 + speakers: + - csoneson + - kevinrue + - federicomarini +support_channel: bioconductor +video_support_channels: +- https://support.bioconductor.org + +bioconductor/epiforbioworkshop2022/tutorial: +description: 'Concepts of causal inference in epidemiology have important ramifications + + for studies across bioinformatics and other fields of health research. In + + this workshop, we introduce basic concepts of epidemiology, study design, + + and causal inference for bioinformaticians. Emphasis is placed on addressing + + bias and confounding as common threats to assessing a causal pathway in a + + variety of study design types and when using common forms of analyses such + + as GWAS and survival analysis. Workshop participants will have the + + opportunity to create their own structural causal models (DAGs) using + + dagitty and ggdag and then use this model to determine how to assess an + + estimated causal effect. Examples using DESeq2, edgeR, and limma will be + + used to show how multivariable models can be fitted depending on the + + hypothesized causal relationship. Presented successfully at BioC2021 to a + + large audience of more than 100, updates that material by revising current + + examples based on participant feedback as well as content updates. + + ' +materials: +- link: https://chloemirzayi.com/epiforbioworkshop2022/articles/EpiForBioWorkshop.html + title: Epidemiology for Bioinformaticians +tags: +- bioconductor +- r +title: Epidemiology for Bioinformaticians +type: Tutorial +versions: +- captions: null + date: '2022-07-27' + length: 83M + link: u2ahgX823Fg + speakers: + - cmirzayi +support_channel: bioconductor +video_support_channels: +- https://support.bioconductor.org + +bioconductor/bioc2022_tidytranscriptomics/tutorial: +description: 'This tutorial will showcase analysis of single-cell RNA sequencing data + + following the tidy data paradigm. The tidy data paradigm provides a standard + + way to organise data values within a dataset, where each variable is a column, + + each observation is a row, and data is manipulated using an easy-to-understand + + vocabulary. Most importantly, the data structure remains consistent across + + manipulation and analysis functions. + + + This can be achieved with the integration of packages present in the R CRAN + + and Bioconductor ecosystem, including tidySingleCellExperiment and tidyverse. + + These packages are part of the tidytranscriptomics suite that introduces a + + tidy approach to RNA sequencing data representation and analysis. For more + + information see the tidy transcriptomics blog. + + ' +materials: +- link: https://tidytranscriptomics-workshops.github.io/bioc2022_tidytranscriptomics/articles/tidytranscriptomics_case_study.html + title: Tidy Transcriptomics For Single-Cell RNA Sequencing Analyses +tags: +- bioconductor +- r +title: Tidy Transcriptomics For Single-Cell RNA Sequencing Analyses +type: Tutorial +versions: +- captions: null + date: '2022-07-28' + length: 1H27M + link: LKWcut6l--c + speakers: + - stemangiola +support_channel: bioconductor +video_support_channels: +- https://support.bioconductor.org + +bioconductor/CompGenomicsBioc2022/tutorial: +description: 'This workshop consists of a demonstration of using DECIPHER and SynExtend + for + + common analyses in comparative genomics. The immediate goal of this session + + is to use sequence data to uncover networks of functionally associated genes. + + These networks consist of genetic regions under shared evolutionary pressure, + + which have previously been shown to imply some degree of conserved function. + + ' +materials: +- link: https://www.ahl27.com/CompGenomicsBioc2022/index.html + title: Comparative Genomics with DECIPHER and SynExtend +tags: +- bioconductor +- r +title: Comparative Genomics with DECIPHER and SynExtend +type: Tutorial +versions: +- captions: null + date: '2022-07-29' + length: 1H5M + link: JgTc2jiMPF0 + speakers: + - ahl27 +support_channel: bioconductor +video_support_channels: +- https://support.bioconductor.org + +bioconductor/SOOBiocWorkshop/tutorial: +title: SpatialOmicsOverlay Workshop +type: Tutorial +instructors: +- maddygriz +- mgrout81 +cover: + link: bioconductor-session.jpg + alt: Bioconductor logo +description: "This workshop will introduce users to the NanoString R package\nSpatialOmicsOverlay. + This package is designed for use in interpretation and\npresentation of the multi-form + data generated by NanoString's GeoMx \xAE\nDigital Spatial Profiler spatial biology + platform. The GeoMx DSP produces\nboth rich imaging and genomics data. Integrating + these data types\nfacilitates a deep understanding of the profiled tissue. The\nSpatialOmicsOverlay + package integrates both of these data types, thereby by\nmaintaining the relationship + between underlying tissue morphology and\nresultant gene expression.\n\nSpecifically, + SpatialOmicsOverlay was developed to visualize and analyze the\nfree-handed nature + of Region of Interest (ROI) selection in a GeoMx\nexperiment, as well as the immunofluorescence-guided + segmentation process.\nThe overlay from the instrument is recreated in the R environment, + which\nallows for plotting overlays with data like ROI type or gene expression. + The\npackage provides a convenient workflow for users to generate customized,\nsharable + visualizations.\n\nThe Introduction to SpatialOmicsOverlay vignette demonstrates + how to use\nOME-TIFF files, which are exported from the GeoMx platform. Participants\nwill + learn how to interact with this file type and generate informative\nplots over images. + This vignette utilizes data from our Spatial Organ Atlas.\nThe Spatial Organ Atlas + is a freely-accesible resource of whole\ntranscriptome spatial profiles of functional + components of tissues from\nhuman and mouse generated using our Whole Transcriptome + Atlas RNA assay. In\nparticular, vignette users will be analyzing data from the + mouse brain. This\ncontent is similar to the vignette available with the package + upon\ninstallation.\n" +materials: +- link: https://nanostring-biostats.github.io/SOOBiocWorkshop/articles/Introduction-to-SOO.html + title: Introduction to SpatialOmicsOverlay + external: true +tags: +- bioconductor +- r +support_channel: bioconductor +video_support_channels: +- https://support.bioconductor.org + +bioconductor/fluentGenomics/tutorial: +title: 'fluentGenomics: A plyranges and tximeta workflow' +type: Tutorial +instructors: +- mikelove +cover: + link: bioconductor-session.jpg + alt: Bioconductor logo +description: 'An extended workflow using the plyranges and tximeta packages for fluent + + genomic data analysis. Use tximeta to correctly import RNA-seq transcript + + quantifications and summarize them to gene counts for downstream analysis. + + Use plyranges for clearly expressing operations over genomic coordinates and + + to combine results from differential expression and differential accessibility + + analyses. + + ' +materials: +- link: https://bit.ly/fluentGenomics + title: 'fluentGenomics: A plyranges and tximeta workflow' +tags: +- bioconductor +- r +support_channel: bioconductor +video_support_channels: +- https://support.bioconductor.org + +bioconductor/MungeSumstats/tutorial: +title: 'MungeSumstats: Standardise the format of GWAS summary statistics' +type: Tutorial +instructors: +- Al-Murphy +cover: + link: bioconductor-session.jpg + alt: Bioconductor logo +description: 'The package is designed to handle the lack of standardisation of output + + files by the GWAS community. The MRC IEU Open GWAS team have provided full + + summary statistics for >10k GWAS, which are API-accessible via the ieugwasr + + and gwasvcf packages. But these GWAS are only standardised in the sense that + + they are VCF format, and can be fully standardised with MungeSumstats. + + + MungeSumstats provides a framework to standardise the format for any GWAS + + summary statistics, including those in VCF format, enabling downstream + + integration and analysis. It addresses the most common discrepancies across + + summary statistic files, and offers a range of adjustable Quality Control + + (QC) steps. + + ' +materials: +- link: https://neurogenomics.github.io/MungeSumstats/articles/MungeSumstats.html + title: 'MungeSumstats: Standardise the format of GWAS summary statistics' +tags: +- bioconductor +- r +support_channel: bioconductor +video_support_channels: +- https://support.bioconductor.org + +bioconductor/RNAseq123/tutorial: +title: RNA-seq analysis is easy as 1-2-3 with limma, Glimma and edgeR +type: Tutorial +instructors: +- mritchie +cover: + link: bioconductor-session.jpg + alt: Bioconductor logo +description: 'The ability to easily and efficiently analyse RNA-sequencing data is + a key + + strength of the Bioconductor project. Starting with counts summarised at the + + gene-level, a typical analysis involves pre-processing, exploratory data + + analysis, differential expression testing and pathway analysis with the + + results obtained informing future experiments and validation studies. In this + + workflow article, we analyse RNA-sequencing data from the mouse mammary gland, + + demonstrating use of the popular edgeR package to import, organise, filter + + and normalise the data, followed by the limma package with its voom method, + + linear modelling and empirical Bayes moderation to assess differential + + expression and perform gene set testing. This pipeline is further enhanced + + by the Glimma package which enables interactive exploration of the results + + so that individual samples and genes can be examined by the user. The complete + + analysis offered by these three packages highlights the ease with which + + researchers can turn the raw counts from an RNA-sequencing experiment into + + biological insights using Bioconductor. + + ' +materials: +- link: https://bit.ly/RNAseq123 + title: RNA-seq analysis is easy as 1-2-3 with limma, Glimma and edgeR +tags: +- bioconductor +- r +support_channel: bioconductor +video_support_channels: +- https://support.bioconductor.org + +bioconductor/RnaSeqGeneEdgeRQL/tutorial: +title: 'From reads to genes to pathways: differential expression analysis of RNA-Seq + experiments using Rsubread and the edgeR quasi-likelihood pipeline' +type: Tutorial +instructors: +- yunshun +cover: + link: bioconductor-session.jpg + alt: Bioconductor logo +description: 'RNA sequencing (RNA-seq) has become a very widely used technology for + + profiling gene expression. One of the most common aims of RNA-seq profiling + + is to identify genes or molecular pathways that are differentially expressed + + (DE) between two or more biological conditions. This article demonstrates a + + computational workflow for the detection of DE genes and pathways from RNA-seq + + data by providing a complete analysis of an RNA-seq experiment profiling + + epithelial cell subsets in the mouse mammary gland. The workflow uses R + + software packages from the open-source Bioconductor project and covers all + + steps of the analysis pipeline, including alignment of read sequences, data + + exploration, differential expression analysis, visualization and pathway + + analysis. Read alignment and count quantification is conducted using the + + Rsubread package and the statistical analyses are performed using the edgeR + + package. The differential expression analysis uses the quasi-likelihood + + functionality of edgeR. + + ' +materials: +- link: https://bit.ly/RnaSeqGeneEdgeRQL + title: 'From reads to genes to pathways: RNA-Seq using Rsubread and edgeR' +tags: +- bioconductor +- r +support_channel: bioconductor +video_support_channels: +- https://support.bioconductor.org + +bioconductor/SingscoreAMLMutations/tutorial: +title: Using singscore to predict mutations in AML from transcriptomic signatures +type: Tutorial +instructors: +- bhuvad +cover: + link: bioconductor-session.jpg + alt: Bioconductor logo +description: 'Advances in RNA sequencing (RNA-seq) technologies that measure the + + transcriptome of biological samples have revolutionised our ability to + + understand transcriptional regulatory programs that underpin diseases + + such as cancer. We recently published singscore - a single-sample, rank-based + + gene set scoring method which quantifies how concordant the transcriptional + + profile of individual samples are relative to specific gene sets of interest. + + Here we demonstrate the application of singscore to investigate + + transcriptional profiles associated with specific mutations or genetic lesions + + in acute myeloid leukemia. Using matched genomic and transcriptomic data + + available through The Cancer Genome Atlas we show that scoring of appropriate + + signatures can distinguish samples with corresponding mutations, reflecting + + the ability of these mutations to drive aberrant transcriptional programs + + involved in leukemogenesis. We believe the singscore method is particularly + + useful for studying heterogeneity within specific subsets of cancers, and as + + demonstrated, singscore has the ability to identify samples where alternative + + mutations/genetic lesions appear to drive transcriptional programs. + + ' +materials: +- link: https://bit.ly/SingscoreAMLMutations + title: Using singscore to predict mutations in AML from transcriptomic signatures +tags: +- bioconductor +- r +support_channel: bioconductor +video_support_channels: +- https://support.bioconductor.org + + + + + + +# tb webinars +# associate with event? +webinar/tb-drug-resistance: +title: 'Webinar: Drug resistance prediction' +description: Principles of drug resistance detection from genomic data +type: Webinar +support_channel: general +tags: +- tuberculosis +versions: +- link: Ddwt-_gQR2M + length: 20M + external: true + speakers: + - GaloGS + captions: [] + date: '2023-01-01' + +webinar/tb-clustering: +title: 'Webinar: The Concept of Clustering' +description: Main aspects of clustering analysis to infer transmission in MTBC +type: Webinar +tags: +- tuberculosis +support_channel: general +versions: +- link: l4cPUECJ7VU + length: 15M + external: true + speakers: + - GaloGS + captions: [] + date: '2023-01-01' + +webinar/tb-phylogenetic-mutations: +title: 'Webinar: Phylogenetic mutations' +description: This video will introduce one special type of mutations to take into + account when studying drug resistance patterns +type: Webinar +tags: +- tuberculosis +versions: +- link: 1ps_o5rpnmw + length: 15M + speakers: + - GaloGS + external: true + captions: null + date: '2023-01-01' + +webinar/tb-genetic-distance: +title: 'Webinar: genetic distance thresholds' +description: Clustering as an approximation to infer transmission +type: Webinar +tags: +- tuberculosis +versions: +- link: kKNgmpy1N94 + length: 15M + speakers: + - GaloGS + captions: [] + external: true + date: '2023-01-01' diff --git a/news/_posts/2021-11-23-video-library.md b/news/_posts/2021-11-23-video-library.md index 1b8329e63dd659..518e81f575fd28 100644 --- a/news/_posts/2021-11-23-video-library.md +++ b/news/_posts/2021-11-23-video-library.md @@ -2,6 +2,7 @@ title: "New Feature: GTN Video Library" contributions: authorship: [shiltemann, hexylena] + infrastructure: [shiltemann, hexylena] tags: [gtn infrastructure, new feature] cover: news/images/gtn-videolib-stats.png coveralt: Screenshot of the GTN Video Library Home page diff --git a/news/_posts/2024-06-14-gtn-video-library.md b/news/_posts/2024-06-14-gtn-video-library.md new file mode 100644 index 00000000000000..6200be2276361b --- /dev/null +++ b/news/_posts/2024-06-14-gtn-video-library.md @@ -0,0 +1,48 @@ +--- +title: "GTN Video Library 2.0: 107 hours of learning across 154 videos" +layout: news +tags: + - gtn + +cover: "assets/images/video-library.png" +coveralt: Screenshot of the GTN Video Library showing a tutorial recording with a large youtube player and extensive metadata about who created the video (Natalie Kucher) and when, how long, etc. +contributions: + authorship: + - shiltemann + - hexylena + infrastructure: + - shiltemann + - hexylena + funding: + - gallantries +--- + +Many GTN tutorials already have recordings. These recordings were made by members of the community for a variety of (online) training events. +Up until now, this video library were part of the [Gallantries Project](https://gallantries.github.io/). +We have now integrated this video library directly into the GTN, and made it even **easier to add video recordings** to GTN tutorials or slide decks! Just use a Google Form to submit your video recordings! + +Want to record one of our tutorials? **We welcome video recordings from everybody!**[^1] + +Please checkout out our FAQs with instructions for adding your own recordings: + + + + + +## What this means for... + +Group | What's new? +--- | --- +Instructors | Just fill out an updated Google Form and we'll take care of the rest. The google form is now more easily discoverable, as are the recording instructions which provide a best practice guide for doing screen recordings of tutorials. We have updated documentation on recording best practices that are easy to follow. +Event Organisers & Teachers | Easily discover our large library of both human and automated video recordings to use in your training materials. All videos are licensed CC-BY so you can use them in your own training materials or even embed them in your course websites (Moodle, Blackboard, etc). +GTN Editors & Maintainers | When a recording is submitted, a pull request will be opened with the appropriate metadata in the correct location and an easy checklist to follow to make sure everyone's video is processed efficiently with every step taken care of. +{: .table.table-striped} + +## Video Library 1.0 + +This work is based on the work done during the @gtn:gallantries project and the [Video Library 1.0]({{ site.baseurl }}/news/2021/11/23/video-library.html). As that project ended, the features developed during that time have now been rolled into the GTN to ensure their long term sustainability + +The number of videos and hours is slightly lower in the GTN version as we have not yet imported every recording, currently we are missing some recordings which are not directly associated with a GTN material (e.g. webinars). + + +[^1]: They must be a recording of a GTN tutorial being taught. We are not accepting recordings of other training events at this time. diff --git a/shared/images/hopkins.png b/shared/images/hopkins.png new file mode 100644 index 00000000000000..283329ebf1019e Binary files /dev/null and b/shared/images/hopkins.png differ diff --git a/shared/images/inab-certh.png b/shared/images/inab-certh.png new file mode 100644 index 00000000000000..a9846ddbe02db2 Binary files /dev/null and b/shared/images/inab-certh.png differ diff --git a/shared/images/minnesotauni.png b/shared/images/minnesotauni.png new file mode 100644 index 00000000000000..1cd3bd257f7e4d Binary files /dev/null and b/shared/images/minnesotauni.png differ diff --git a/shared/images/ncbi.png b/shared/images/ncbi.png new file mode 100644 index 00000000000000..db0a772204d846 Binary files /dev/null and b/shared/images/ncbi.png differ diff --git a/shared/images/petermac.png b/shared/images/petermac.png new file mode 100644 index 00000000000000..df1c260bbb95d0 Binary files /dev/null and b/shared/images/petermac.png differ diff --git a/shared/images/sciensano.jpg b/shared/images/sciensano.jpg new file mode 100644 index 00000000000000..69def5a83f9211 Binary files /dev/null and b/shared/images/sciensano.jpg differ diff --git a/topics/admin/tutorials/ansible-galaxy/tutorial.md b/topics/admin/tutorials/ansible-galaxy/tutorial.md index ab277fa2e1ff2b..bae0fe55ff1b18 100644 --- a/topics/admin/tutorials/ansible-galaxy/tutorial.md +++ b/topics/admin/tutorials/ansible-galaxy/tutorial.md @@ -50,6 +50,37 @@ edam_ontology: - topic_3489 # Database Management - topic_0605 # Informatics - topic_3071 # Data Management + +recordings: +- captioners: + - natefoo + - hexylena + date: '2022-03-01' + galaxy_version: '21.05' + length: 2H50M + youtube_id: LPK8rP_qUiA + speakers: + - natefoo + - hexylena +- captioners: + - natefoo + - hexylena + date: '2021-06-28' + galaxy_version: '21.05' + length: 2H47M + youtube_id: zT70luZqPOU + speakers: + - natefoo +- captioners: + - shiltemann + - hexylena + date: '2021-02-15' + galaxy_version: '21.01' + length: 53M + youtube_id: il83uApg7Hc + speakers: + - hexylena + --- This tutorial assumes you have some familiarity with [Ansible](https://www.ansible.com/resources/get-started) and are comfortable with writing and running playbooks. If not, please consider following our [Ansible Tutorial]({% link topics/admin/tutorials/ansible/tutorial.md %}) first. diff --git a/topics/admin/tutorials/ansible/slides.html b/topics/admin/tutorials/ansible/slides.html index a1ce0f7298b358..5d2067e9d9a9b4 100644 --- a/topics/admin/tutorials/ansible/slides.html +++ b/topics/admin/tutorials/ansible/slides.html @@ -18,6 +18,17 @@ - Many system administration, software installation, and software management tasks are already available as Ansible tasks or roles contributors: - hexylena + +recordings: +- captioners: + - martenson + date: '2021-02-15' + galaxy_version: '21.01' + length: 5M + youtube_id: KFpbfmN0OTE + speakers: + - jdavcs + --- ## Configuration Management diff --git a/topics/admin/tutorials/ansible/tutorial.md b/topics/admin/tutorials/ansible/tutorial.md index dd652b5a3563b4..86d3f523bc85b4 100644 --- a/topics/admin/tutorials/ansible/tutorial.md +++ b/topics/admin/tutorials/ansible/tutorial.md @@ -26,6 +26,17 @@ subtopic: core tags: - ansible + +recordings: +- captioners: + - martenson + date: '2021-02-15' + galaxy_version: '21.01' + length: 61M + youtube_id: 2KdT0sYKUeE + speakers: + - martenson + --- # Overview diff --git a/topics/admin/tutorials/apptainer/tutorial.md b/topics/admin/tutorials/apptainer/tutorial.md index e7c5fb1727c7af..b6a3eeeb63f9fe 100644 --- a/topics/admin/tutorials/apptainer/tutorial.md +++ b/topics/admin/tutorials/apptainer/tutorial.md @@ -27,6 +27,27 @@ requirements: tutorials: - ansible - ansible-galaxy + +recordings: +- captioners: + - gallardoalba + - slugger70 + date: '2021-06-28' + galaxy_version: '21.05' + length: 43M + youtube_id: q6Dt7j713tI + speakers: + - slugger70 +- captioners: + - hexylena + - cat-bro + date: '2021-02-15' + galaxy_version: '21.01' + length: 16M + youtube_id: airzg4-ETEs + speakers: + - hexylena + --- In this tutorial you will learn how to configure Galaxy to run jobs using [Apptainer](https://apptainer.org) containers provided by the [BioContainers](https://biocontainers.pro/) community. diff --git a/topics/admin/tutorials/connect-to-compute-cluster/tutorial.md b/topics/admin/tutorials/connect-to-compute-cluster/tutorial.md index 375c5a175d5890..0113604f2c491a 100644 --- a/topics/admin/tutorials/connect-to-compute-cluster/tutorial.md +++ b/topics/admin/tutorials/connect-to-compute-cluster/tutorial.md @@ -39,6 +39,26 @@ follow_up_training: topic_name: admin tutorials: - job-destinations + +recordings: +- captioners: + - cat-bro + - shiltemann + date: '2021-02-15' + galaxy_version: '21.01' + length: 31M + youtube_id: R0NbHscL3jA + speakers: + - hexylena +- captioners: + - beatrizserrano + date: '2021-02-15' + galaxy_version: '21.01' + length: 6M + youtube_id: 7CYI5yw9MN8 + speakers: + - hexylena + --- The tools that are added to Galaxy can have a wide variance in the compute resources that they require and work efficiently on. @@ -128,7 +148,7 @@ be taken into consideration when choosing where to run jobs and what parameters > @@ -194,6 +194,16 @@ nginx_ssl_role: usegalaxy_eu.certbot > nginx_conf_ssl_certificate: /etc/ssl/certs/fullchain.pem > nginx_conf_ssl_certificate_key: /etc/ssl/user/privkey-www-data.pem -> +> > +# Slurm > +slurm_roles: ['controller', 'exec'] # Which roles should the machine play? exec are execution hosts. > +slurm_nodes: diff --git a/topics/admin/tutorials/cvmfs/slides.html b/topics/admin/tutorials/cvmfs/slides.html index b8103c51b15290..f6e67fe7d897c6 100644 --- a/topics/admin/tutorials/cvmfs/slides.html +++ b/topics/admin/tutorials/cvmfs/slides.html @@ -14,6 +14,17 @@ - slugger70 - hexylena + +recordings: +- captioners: + - hexylena + date: '2021-02-15' + galaxy_version: '21.01' + length: 3M + youtube_id: g_cavAO-fBM + speakers: + - awspolly + --- # Built in Data diff --git a/topics/admin/tutorials/cvmfs/tutorial.md b/topics/admin/tutorials/cvmfs/tutorial.md index c95e304534d252..57c2d3a88b1adf 100644 --- a/topics/admin/tutorials/cvmfs/tutorial.md +++ b/topics/admin/tutorials/cvmfs/tutorial.md @@ -28,6 +28,17 @@ subtopic: data tags: - ansible - git-gat + +recordings: +- captioners: + - slugger70 + date: '2021-02-15' + galaxy_version: '21.01' + length: 23M + youtube_id: X3iFMZP_fQ8 + speakers: + - slugger70 + --- > These words come from a transcript of Simon Gladman teaching this course. He diff --git a/topics/admin/tutorials/data-library/tutorial.md b/topics/admin/tutorials/data-library/tutorial.md index 5d17f2b6fc1898..07897f412ad2c5 100644 --- a/topics/admin/tutorials/data-library/tutorial.md +++ b/topics/admin/tutorials/data-library/tutorial.md @@ -29,6 +29,18 @@ requirements: - ansible - ansible-galaxy - tool-management + +recordings: +- captioners: + - hexylena + - shiltemann + date: '2021-02-15' + galaxy_version: '21.01' + length: 22M + youtube_id: 6BWu29Oiihw + speakers: + - shiltemann + --- Data libraries are a great way to provide structured repositories of data to diff --git a/topics/admin/tutorials/gxadmin/slides.html b/topics/admin/tutorials/gxadmin/slides.html index ee8600b8b242be..f4f674359567d0 100644 --- a/topics/admin/tutorials/gxadmin/slides.html +++ b/topics/admin/tutorials/gxadmin/slides.html @@ -19,6 +19,17 @@ - bgruening - slugger70 - hexylena + +recordings: +- captioners: + - hexylena + date: '2021-02-15' + galaxy_version: '21.01' + length: 2M + youtube_id: QFwOgDyFSSA + speakers: + - awspolly + --- # Database Queries diff --git a/topics/admin/tutorials/interactive-tools/tutorial.md b/topics/admin/tutorials/interactive-tools/tutorial.md index fc616f0e836aa0..91696ed5a58304 100644 --- a/topics/admin/tutorials/interactive-tools/tutorial.md +++ b/topics/admin/tutorials/interactive-tools/tutorial.md @@ -34,6 +34,18 @@ requirements: - connect-to-compute-cluster - job-destinations subtopic: features + +recordings: +- captioners: + - abretaud + - hexylena + date: '2021-02-15' + galaxy_version: '21.01' + length: 44M + youtube_id: lACsIhnbTbE + speakers: + - abretaud + --- diff --git a/topics/admin/tutorials/job-destinations/tutorial.md b/topics/admin/tutorials/job-destinations/tutorial.md index 236a4972cf1fd1..684db0e567ba1b 100644 --- a/topics/admin/tutorials/job-destinations/tutorial.md +++ b/topics/admin/tutorials/job-destinations/tutorial.md @@ -41,6 +41,17 @@ requirements: - connect-to-compute-cluster abbreviations: TPV: Total Perspective Vortex + +recordings: +- captioners: + - shiltemann + date: '2021-02-15' + galaxy_version: '21.01' + length: 31M + youtube_id: qX8GjTJwnAk + speakers: + - hexylena + --- diff --git a/topics/admin/tutorials/monitoring/slides.html b/topics/admin/tutorials/monitoring/slides.html index 9455f6c435b6d6..912875012d595c 100644 --- a/topics/admin/tutorials/monitoring/slides.html +++ b/topics/admin/tutorials/monitoring/slides.html @@ -25,6 +25,17 @@ - bgruening - slugger70 - hexylena + +recordings: +- captioners: + - hexylena + date: '2021-02-15' + galaxy_version: '21.01' + length: 2M + youtube_id: qcp9lEUxCGI + speakers: + - awspolly + --- # Telegraf, InfluxDB, and Grafana diff --git a/topics/admin/tutorials/monitoring/tutorial.md b/topics/admin/tutorials/monitoring/tutorial.md index 53146d8196c086..1a8fdc02e49a85 100644 --- a/topics/admin/tutorials/monitoring/tutorial.md +++ b/topics/admin/tutorials/monitoring/tutorial.md @@ -35,6 +35,17 @@ requirements: - gxadmin abbreviations: TSDB: Time Series Database + +recordings: +- captioners: + - shiltemann + - hexylena + date: '2021-02-15' + galaxy_version: '21.01' + length: 73M + youtube_id: drUaYQtMBLY + speakers: [] + --- diff --git a/topics/admin/tutorials/object-store/slides.html b/topics/admin/tutorials/object-store/slides.html index b4cb43de259fde..58383054be870d 100644 --- a/topics/admin/tutorials/object-store/slides.html +++ b/topics/admin/tutorials/object-store/slides.html @@ -17,6 +17,16 @@ - hexylena subtopic: data +recordings: +- captioners: + - cat-bro + date: '2021-02-15' + galaxy_version: '21.01' + length: 18M + youtube_id: Hv2bvjk5sjE + speakers: + - gmauro + --- # Object Store Plugins diff --git a/topics/admin/tutorials/pulsar/slides.html b/topics/admin/tutorials/pulsar/slides.html index 5f61f5365d1f10..08200fb056691a 100644 --- a/topics/admin/tutorials/pulsar/slides.html +++ b/topics/admin/tutorials/pulsar/slides.html @@ -32,6 +32,17 @@ - ansible-galaxy - title: "A server/VM on which to deploy Pulsar" type: "none" + +recordings: +- captioners: + - shiltemann + date: '2021-02-15' + galaxy_version: '21.01' + length: 14M + youtube_id: M1-Z_2tuQPI + speakers: + - slugger70 + --- # What are heterogenous compute resources? diff --git a/topics/admin/tutorials/pulsar/tutorial.md b/topics/admin/tutorials/pulsar/tutorial.md index 6f85e6de0772be..978a862fa2256c 100644 --- a/topics/admin/tutorials/pulsar/tutorial.md +++ b/topics/admin/tutorials/pulsar/tutorial.md @@ -39,6 +39,26 @@ requirements: - cvmfs - title: "A server/VM on which to deploy Pulsar" type: "none" + +recordings: +- captioners: + - hexylena + date: '2021-06-28' + galaxy_version: '21.05' + length: 1H28M + youtube_id: f0QdF8NDhsM + speakers: + - slugger70 +- captioners: + - simonbray + - slugger70 + date: '2021-02-15' + galaxy_version: '21.01' + length: 57M + youtube_id: a7fKJT4Fs9k + speakers: + - slugger70 + --- diff --git a/topics/admin/tutorials/tiaas/tutorial.md b/topics/admin/tutorials/tiaas/tutorial.md index 8d6acb5e46c6b8..22cbc7b7cd5d9e 100644 --- a/topics/admin/tutorials/tiaas/tutorial.md +++ b/topics/admin/tutorials/tiaas/tutorial.md @@ -32,6 +32,18 @@ requirements: - pulsar abbreviations: TIaaS: Training Infrastructure as a Service + +recordings: +- captioners: + - hexylena + - shiltemann + date: '2021-02-15' + galaxy_version: '21.01' + length: 24M + youtube_id: tz0ZbK_8Vcc + speakers: + - hexylena + --- Galaxy is widely used for teaching. In order to facilitate instructors, the Galaxy Project has developed {TIaaS}. diff --git a/topics/admin/tutorials/tool-management/slides.html b/topics/admin/tutorials/tool-management/slides.html index c24341cc410126..32d3a0e3c616ff 100644 --- a/topics/admin/tutorials/tool-management/slides.html +++ b/topics/admin/tutorials/tool-management/slides.html @@ -23,6 +23,17 @@ - slugger70 - hexylena - nsoranzo + +recordings: +- captioners: + - cat-bro + date: '2021-06-28' + galaxy_version: '21.05' + length: 17M + youtube_id: 8Rdho_eUeKc + speakers: + - cat-bro + --- ## Galaxy tools diff --git a/topics/admin/tutorials/tool-management/tutorial.md b/topics/admin/tutorials/tool-management/tutorial.md index 9bcd0296deaf41..9a3e749f4c125d 100644 --- a/topics/admin/tutorials/tool-management/tutorial.md +++ b/topics/admin/tutorials/tool-management/tutorial.md @@ -22,6 +22,25 @@ subtopic: features tags: - tools - git-gat + +recordings: +- captioners: + - cat-bro + date: '2021-06-28' + galaxy_version: '21.05' + length: 27M + youtube_id: pda0v9b3vO4 + speakers: + - cat-bro +- captioners: + - eancelet + date: '2021-02-15' + galaxy_version: '21.01' + length: 47M + youtube_id: 7Qqwrzn--YI + speakers: + - cat-bro + --- This tutorial will introduce you to one of Galaxy's associated projects - [Ephemeris](https://ephemeris.readthedocs.io/). Ephemeris is a small Python library and set of scripts for managing the bootstrapping of Galaxy plugins - tools, index data, and workflows. It aims to help automate, and limit the quantity of manual actions admins have to do in order to maintain a Galaxy instance. diff --git a/topics/admin/tutorials/tus/tutorial.md b/topics/admin/tutorials/tus/tutorial.md index 30cec6b5ae6e7b..daf2eaa632444e 100644 --- a/topics/admin/tutorials/tus/tutorial.md +++ b/topics/admin/tutorials/tus/tutorial.md @@ -21,6 +21,17 @@ requirements: tutorials: - ansible - ansible-galaxy + +recordings: +- captioners: + - hexylena + date: '2022-03-11' + galaxy_version: '22.01' + length: 7M + youtube_id: v5sbIqF_0qo + speakers: + - hexylena + --- Here you'll learn to setup [TUS](https://tus.io/) an open source resumable file upload server to process uploads for Galaxy. We use an external process here to offload the main Galaxy processes for more important work and not impact the entire system during periods of heavy uploading. diff --git a/topics/admin/tutorials/upgrading/tutorial.md b/topics/admin/tutorials/upgrading/tutorial.md index 88eed7859ddc53..f0b355e8625218 100644 --- a/topics/admin/tutorials/upgrading/tutorial.md +++ b/topics/admin/tutorials/upgrading/tutorial.md @@ -29,6 +29,18 @@ requirements: tutorials: - ansible - ansible-galaxy + +recordings: +- captioners: + - slugger70 + - hexylena + date: '2021-02-15' + galaxy_version: '21.01' + length: 36M + youtube_id: OnL47MvXjoo + speakers: + - slugger70 + --- # Overview diff --git a/topics/admin/tutorials/users-groups-quotas/slides.html b/topics/admin/tutorials/users-groups-quotas/slides.html index a3c28d1d0b9bfd..dce7d3607d2d43 100644 --- a/topics/admin/tutorials/users-groups-quotas/slides.html +++ b/topics/admin/tutorials/users-groups-quotas/slides.html @@ -25,6 +25,17 @@ - hexylena subtopic: maintenance + +recordings: +- captioners: + - hexylena + date: '2021-02-15' + galaxy_version: '21.01' + length: 4M + youtube_id: crywu31L8qg + speakers: + - awspolly + --- # Users diff --git a/topics/assembly/tutorials/assembly-with-preprocessing/tutorial.md b/topics/assembly/tutorials/assembly-with-preprocessing/tutorial.md index ea4a2c55726e07..495f2a2b281796 100644 --- a/topics/assembly/tutorials/assembly-with-preprocessing/tutorial.md +++ b/topics/assembly/tutorials/assembly-with-preprocessing/tutorial.md @@ -38,6 +38,16 @@ tags: contributors: - wm75 +recordings: +- captioners: + - mtekman + date: '2021-02-15' + galaxy_version: '21.01' + length: 25M + youtube_id: jNFLYhjgJPs + speakers: + - gallardoalba + --- @@ -162,7 +172,7 @@ steps are independent of the data source you choose. > > A name tag will automatically propagate to any new dataset derived > > from the tagged dataset. > {: .comment} -> +> > You can create a name tag by attaching a tag starting with `#` to any > dataset. > @@ -313,7 +323,7 @@ steps are independent of the data source you choose. > - Click **Apply** > - *"Type"*: `fastqsanger.gz` > - *"Name"*: `Nanopore data` (or similar) -> - *"Add nametag for name:"* {% icon param-check %} +> - *"Add nametag for name:"* {% icon param-check %} > - Click **Upload** > {: .hands_on} diff --git a/topics/assembly/tutorials/chloroplast-assembly/tutorial.md b/topics/assembly/tutorials/chloroplast-assembly/tutorial.md index 9cef9b57a82474..66b3877fdc24a4 100644 --- a/topics/assembly/tutorials/chloroplast-assembly/tutorial.md +++ b/topics/assembly/tutorials/chloroplast-assembly/tutorial.md @@ -23,6 +23,17 @@ key_points: - "We can view an assembly, its mapped reads, and its annotations in JBrowse" contributors: - annasyme + +recordings: +- captioners: + - annasyme + date: '2021-02-15' + galaxy_version: '21.01' + length: 35M + youtube_id: eyfCyINTD_E + speakers: + - annasyme + --- # Introduction diff --git a/topics/assembly/tutorials/general-introduction/slides.html b/topics/assembly/tutorials/general-introduction/slides.html index 0ff845f683f12d..ee9360af6a856d 100644 --- a/topics/assembly/tutorials/general-introduction/slides.html +++ b/topics/assembly/tutorials/general-introduction/slides.html @@ -17,6 +17,18 @@ - "It looks as though there are some exploitable patterns in the metric data vs the k-mer size." contributors: - slugger70 + +recordings: +- captioners: + - shiltemann + - hexylena + date: '2021-02-15' + galaxy_version: '21.01' + length: 25M + youtube_id: -EX_G1griZE + speakers: + - slugger70 + --- .enlarge120[ diff --git a/topics/assembly/tutorials/largegenome/tutorial.md b/topics/assembly/tutorials/largegenome/tutorial.md index 82030a661e498d..3da0077855c641 100644 --- a/topics/assembly/tutorials/largegenome/tutorial.md +++ b/topics/assembly/tutorials/largegenome/tutorial.md @@ -21,6 +21,17 @@ key_points: - "We can assess the quality of this assembly with various tools" contributors: - annasyme + +recordings: +- youtube_id: BM70nvwWjOY + captioners: + - annasyme + speakers: + - annasyme + length: 25M + date: '2023-05-16' + galaxy_version: '23.01' + --- *Note: We recommend running this tutorial on either the Galaxy Europe or Galaxy Australia servers. Other servers (such as Galaxy main) have not yet been configured fully for all the tools in this analysis.* diff --git a/topics/assembly/tutorials/vgp_genome_assembly/tutorial.md b/topics/assembly/tutorials/vgp_genome_assembly/tutorial.md index 039b7d8474fd30..34ee54d2ae56c8 100644 --- a/topics/assembly/tutorials/vgp_genome_assembly/tutorial.md +++ b/topics/assembly/tutorials/vgp_genome_assembly/tutorial.md @@ -41,6 +41,18 @@ abbreviations: CN: copy number ASM: assembly QV: consensus accuracy quality value + +recordings: +- captioners: + - delphine-l + date: '2022-03-13' + galaxy_version: '21.09' + length: 10M + youtube_id: 0EoBDiyd-84 + speakers: + - delphine-l + + --- @@ -128,7 +140,7 @@ The {VGP} assembly pipeline has a modular organization, consisting of ten workfl | HiFi + parental + BioNano | Properly phased with improved contiguity | G | | HiFi + parental data + Hi-C + BioNano | Properly phased with even more improved contiguity | H | -In this table, *HiFi* and *Hi-C* refer to HiFi and Hi-C data derived from the individual whose genome is being assembled. **This tutorial assumes you are assembling the genome of one individual; there are special considerations necessary for pooled data that are not covered in this tutorial.** *HiFi* and *Hi-C* are derived from the individual whose genome is being assembled. (Note: you can use Hi-C data from another individual of the same species to scaffold, but you *cannot* use that data to phase the contigs in hifiasm.) *Parental data* is high-coverage Illumina data derived from the parents of the individual being assembled, and is the key component of trio-based genome assembly. Each combination of input datasets is demonstrated in Fig. 2 by an *analysis trajectory*: a combination of workflows designed for generating the best assembly given a particular combination of inputs. These trajectories are listed in the table above and shown in the figure below. +In this table, *HiFi* and *Hi-C* refer to HiFi and Hi-C data derived from the individual whose genome is being assembled. **This tutorial assumes you are assembling the genome of one individual; there are special considerations necessary for pooled data that are not covered in this tutorial.** *HiFi* and *Hi-C* are derived from the individual whose genome is being assembled. (Note: you can use Hi-C data from another individual of the same species to scaffold, but you *cannot* use that data to phase the contigs in hifiasm.) *Parental data* is high-coverage Illumina data derived from the parents of the individual being assembled, and is the key component of trio-based genome assembly. Each combination of input datasets is demonstrated in Fig. 2 by an *analysis trajectory*: a combination of workflows designed for generating the best assembly given a particular combination of inputs. These trajectories are listed in the table above and shown in the figure below. ![The nine workflows of Galaxy assembly pipeline](../../images/vgp_assembly/VGP_workflow_modules.svg "Eight analysis trajectories are possible depending on the combination of input data. A decision on whether or not to invoke Workflow 6 is based on the analysis of QC output of workflows 3, 4, or 5. Thicker lines connecting Workflows 7, 8, and 9 represent the fact that these workflows are invoked separately for each phased assembly (once for maternal and once for paternal).")
      @@ -389,7 +401,7 @@ Now, let's analyze the *k*-mer profiles, fitted models and estimated parameters ![Genomescope plot](../../images/vgp_assembly/genomescope_plot.png "GenomeScope2 31-mer profile. The first peak located at coverage 25✕ corresponds to the heterozygous peak. The second peak at coverage 50✕, corresponds to the homozygous peak. Estimate of the heterozygous portion is 0.576%. The plot also includes information about the inferred total genome length (len), genome unique length percent ('uniq'), overall heterozygosity rate ('ab'), mean *k*-mer coverage for heterozygous bases ('kcov'), read error rate ('err'), and average rate of read duplications ('dup'). It also reports the user-given parameters of *k*-mer size ('k') and ploidy ('p')."){:width="65%"} -This distribution is the result of the Poisson process underlying the generation of sequencing reads. As we can see, the *k*-mer profile follows a bimodal distribution, indicative of a diploid genome. The distribution is consistent with the theoretical diploid model (model fit > 93%). Low frequency *k*-mers are the result of sequencing errors. GenomeScope2 estimated a haploid genome size is around 11.7 Mb, a value reasonably close to *Saccharomyces* genome size. Additionally, it revealed that the variation across the genomic sequences is 0.576%. Some of these parameters can be used later on to parameterize running `purge_dups`. This is covered in the [**solo** contiging section](#solo_hic_switch) section of the tutorial. +This distribution is the result of the Poisson process underlying the generation of sequencing reads. As we can see, the *k*-mer profile follows a bimodal distribution, indicative of a diploid genome. The distribution is consistent with the theoretical diploid model (model fit > 93%). Low frequency *k*-mers are the result of sequencing errors. GenomeScope2 estimated a haploid genome size is around 11.7 Mb, a value reasonably close to *Saccharomyces* genome size. Additionally, it revealed that the variation across the genomic sequences is 0.576%. Some of these parameters can be used later on to parameterize running `purge_dups`. This is covered in the [**solo** contiging section](#solo_hic_switch) section of the tutorial. # Assembly with **hifiasm** @@ -535,9 +547,9 @@ Let's use gfastats to get a basic idea of what our assembly looks like. We'll ru > Scaffold N50 0 > Scaffold auN 0.00 > ``` -> +> > Because we ran `gfastats` on hap1 and hap2 outputs of `hifiasm` we need to join the two outputs together for easier interpretation: -> +> >
      > > **Step 3**: Run {% tool [Column join](toolshed.g2.bx.psu.edu/repos/iuc/collection_column_join/collection_column_join/0.0.3) %} with the following parameters: @@ -557,7 +569,7 @@ Let's use gfastats to get a basic idea of what our assembly looks like. We'll ru > # segments 17 16 > ``` > -> Now let's extract only relevant information by excluding all lines containing the word `scaffold` since there are no scaffolds at this stage of the assembly process (only contigs): +> Now let's extract only relevant information by excluding all lines containing the word `scaffold` since there are no scaffolds at this stage of the assembly process (only contigs): > >
      > @@ -574,7 +586,7 @@ Let's use gfastats to get a basic idea of what our assembly looks like. We'll ru > {: .hands_on} -Take a look at the `gfastats on hap1 and hap2 contigs` output — it has three columns: +Take a look at the `gfastats on hap1 and hap2 contigs` output — it has three columns: 1. Name of statistic 2. Value for haplotype 1 (hap1) @@ -599,7 +611,7 @@ According to the report, both assemblies are quite similar; the hap1 assembly in Next, we will use {BUSCO}, which will provide quantitative assessment of the completeness of a genome assembly in terms of expected gene content. It relies on the analysis of genes that should be present only once in a complete assembly or gene set, while allowing for rare gene duplications or losses ({% cite Simo2015 %}). > Assessing assembly completeness with BUSCO -> +> > **Step 1**: Run {% tool [Busco](toolshed.g2.bx.psu.edu/repos/iuc/busco/busco/5.5.0+galaxy0) %} with the following parameters: > 1. {% icon param-files %} *"Sequences to analyze"*: `Hap1 contigs FASTA` and `Hap2 contigs FASTA` > 2. *"Lineage data source"*: `Use cached lineage data` @@ -766,9 +778,9 @@ Let's use gfastats to get a basic idea of what our assembly looks like. We'll ru > Scaffold N50 813311 > Scaffold auN 913050.77 > ``` -> +> > Because we ran `gfastats` on Primary and Alternate outputs of `hifiasm` we need to join the two outputs together for easier interpretation: -> +> >
      > > **Step 3**: Run {% tool [Column join](toolshed.g2.bx.psu.edu/repos/iuc/collection_column_join/collection_column_join/0.0.3) %} with the following parameters: @@ -789,7 +801,7 @@ Let's use gfastats to get a basic idea of what our assembly looks like. We'll ru > # gaps 0 0 > ``` > -> Now let's extract only relevant information by excluding all lines containing the word `scaffold` since there are no scaffolds at this stage of the assembly process (only contigs): +> Now let's extract only relevant information by excluding all lines containing the word `scaffold` since there are no scaffolds at this stage of the assembly process (only contigs): > >
      > @@ -806,7 +818,7 @@ Let's use gfastats to get a basic idea of what our assembly looks like. We'll ru > {: .hands_on} -Take a look at the `gfastats on Pri and Alt contigs` output — it has three columns: +Take a look at the `gfastats on Pri and Alt contigs` output — it has three columns: 1. Name of statistic 2. Value for haplotype 1 (Pri) @@ -831,7 +843,7 @@ The report makes it clear that the two assemblies are markedly uneven: the prima Next, we will use {BUSCO}, which will provide quantitative assessment of the completeness of a genome assembly in terms of expected gene content. It relies on the analysis of genes that should be present only once in a complete assembly or gene set, while allowing for rare gene duplications or losses ({% cite Simo2015 %}). > Assessing assembly completeness with BUSCO -> +> > **Step 1**: Run {% tool [Busco](toolshed.g2.bx.psu.edu/repos/iuc/busco/busco/5.5.0+galaxy0) %} with the following parameters: > 1. {% icon param-files %} *"Sequences to analyze"*: `Primary contigs FASTA` and `Alternate contigs FASTA` > 2. *"Lineage data source"*: `Use cached lineage data` @@ -932,15 +944,15 @@ The first relevant parameter is the `estimated genome size`. > k = 31 > TESTING set to TRUE > -> property min max -> Homozygous (aa) 99.4165% 99.4241% -> Heterozygous (ab) 0.575891% 0.583546% -> Genome Haploid Length 11,739,321 bp 11,747,160 bp -> Genome Repeat Length 722,921 bp 723,404 bp -> Genome Unique Length 11,016,399 bp 11,023,755 bp -> Model Fit 92.5159% 96.5191% -> Read Error Rate 0.000943206% 0.000943206% ->``` +> property min max +> Homozygous (aa) 99.4165% 99.4241% +> Heterozygous (ab) 0.575891% 0.583546% +> Genome Haploid Length 11,739,321 bp 11,747,160 bp +> Genome Repeat Length 722,921 bp 723,404 bp +> Genome Unique Length 11,016,399 bp 11,023,755 bp +> Model Fit 92.5159% 96.5191% +> Read Error Rate 0.000943206% 0.000943206% +>``` >
      >**Step 2**: Copy the number value for the maximum Genome Haploid Length to your clipboard (CTRL + C on Windows; CMD + C on MacOS). > @@ -1037,7 +1049,7 @@ Now let's parse the `transition between haploid & diploid` and `upper bound for An ideal haploid representation would consist of one allelic copy of all heterozygous regions in the two haplomes, as well as all hemizygous regions from both haplomes ({% cite Guan2019 %}). However, in highly heterozygous genomes, assembly algorithms are frequently not able to identify the highly divergent allelic sequences as belonging to the same region, resulting in the assembly of those regions as separate contigs. This can lead to issues in downstream analysis, such as scaffolding, gene annotation and read mapping in general ({% cite Small2007 %}, {% cite Guan2019 %}, {% cite Roach2018 %}). In order to solve this problem, we are going to use purge_dups; this tool will allow us to identify and reassign allelic contigs. -This stage consists of three substages: read-depth analysis, generation of all versus all self-alignment and resolution of haplotigs and overlaps (fig. 8). This is meant to try to resolve the {false duplications} depicted in **Figure 1**. +This stage consists of three substages: read-depth analysis, generation of all versus all self-alignment and resolution of haplotigs and overlaps (fig. 8). This is meant to try to resolve the {false duplications} depicted in **Figure 1**. ![Post-processing with purge_dups](../../images/vgp_assembly/purge_dupspipeline.png "Purge_dups pipeline. Adapted from github.com/dfguan/purge_dups. Purge_dups is integrated in a multi-step pipeline consisting in three main substages. Red indicates the steps which require to use Minimap2.") @@ -1498,7 +1510,7 @@ YaHS is an open source software that makes use of Hi-C to linearly orient and or > >Optionally considering Hi-C library restriction enzymes, YaHS normalizes contact frequencies by the corresponding number of cutting sites. A scaffolding graph is constructed with contigs as nodes and contig joins as weighted edges. The graph undergoes simplification steps, including edge filtering, tip and blunt end trimming, repeat resolution, transitive edge removal, bubble and ambiguous orientation resolution, weak edge trimming, and ambiguous edge removal. > ->Finally, the graph is traversed to assemble scaffolds along continuous paths. Optionally, a second step of assembly error correction breaks scaffolds at positions lacking sufficient Hi-C coverage. YaHS employs a hierarchical joining process with multiple rounds of scaffolding at decreasing resolutions (increasing chunk sizes), using previous round scaffolds as input. +>Finally, the graph is traversed to assemble scaffolds along continuous paths. Optionally, a second step of assembly error correction breaks scaffolds at positions lacking sufficient Hi-C coverage. YaHS employs a hierarchical joining process with multiple rounds of scaffolding at decreasing resolutions (increasing chunk sizes), using previous round scaffolds as input. >![ An overview of YaHS](../../images/vgp_assembly/yahs.png "Schematic of how Hi-C information is used to scaffold and orient contigs along chromosomes using long-range information, as well as link separated haplotype blocks into chromosome-scale haplotypes (from {% cite Zhou2022 %}") > >See {% cite Zhou2022 %} for exact details. diff --git a/topics/climate/tutorials/fates/slides.html b/topics/climate/tutorials/fates/slides.html index 2796efa65e5fa0..b3e90983dc4ce5 100644 --- a/topics/climate/tutorials/fates/slides.html +++ b/topics/climate/tutorials/fates/slides.html @@ -24,6 +24,17 @@ tutorials: - introduction + +recordings: +- captioners: + - annefou + date: '2021-02-15' + galaxy_version: '21.01' + length: 7M + youtube_id: Z57C7MQG95s + speakers: + - awspolly + --- ## Evolution of land surface models diff --git a/topics/climate/tutorials/fates/tutorial.md b/topics/climate/tutorials/fates/tutorial.md index 5d990fb173d493..594849e61c44ea 100644 --- a/topics/climate/tutorials/fates/tutorial.md +++ b/topics/climate/tutorials/fates/tutorial.md @@ -39,6 +39,17 @@ contributors: - annefou - huitang-earth + +recordings: +- captioners: + - s3by01 + date: '2021-02-15' + galaxy_version: '21.01' + length: 1H20M + youtube_id: 0LsEx11eLiI + speakers: + - annefou + --- diff --git a/topics/climate/tutorials/pangeo-notebook/tutorial.md b/topics/climate/tutorials/pangeo-notebook/tutorial.md index 84e4924d4242b0..9fd1d64bb4b624 100755 --- a/topics/climate/tutorials/pangeo-notebook/tutorial.md +++ b/topics/climate/tutorials/pangeo-notebook/tutorial.md @@ -62,6 +62,17 @@ contributors: notebook: language: python snippet: topics/climate/tutorials/pangeo-notebook/preamble.md + +recordings: +- captioners: + - annefou + date: '2022-03-03' + galaxy_version: '21.09' + length: 17M + youtube_id: XFF2yRcYGp0 + speakers: + - annefou + --- diff --git a/topics/climate/tutorials/pangeo/tutorial.md b/topics/climate/tutorials/pangeo/tutorial.md index bba3eb8c7b82ae..8fa757e43fa911 100755 --- a/topics/climate/tutorials/pangeo/tutorial.md +++ b/topics/climate/tutorials/pangeo/tutorial.md @@ -28,6 +28,17 @@ tags: contributors: - annefou + +recordings: +- captioners: + - annefou + date: '2022-03-03' + galaxy_version: '21.09' + length: 1H20M + youtube_id: V43jEJfa6hY + speakers: + - annefou + --- diff --git a/topics/computational-chemistry/tutorials/htmd-analysis/tutorial.md b/topics/computational-chemistry/tutorials/htmd-analysis/tutorial.md index 69991f6ed29265..9ae3954dd40464 100644 --- a/topics/computational-chemistry/tutorials/htmd-analysis/tutorial.md +++ b/topics/computational-chemistry/tutorials/htmd-analysis/tutorial.md @@ -23,6 +23,18 @@ contributors: - chrisbarnettster - bgruening + +recordings: +- captioners: + - simonbray + date: '2021-02-15' + galaxy_version: '21.01' + length: 1H40M + youtube_id: LjW3nES04bI + speakers: + - simonbray + - chrisbarnettster + --- This tutorial provides an introduction to using high-throughput molecular dynamics to study protein-ligand interaction, as applied to the N-terminal domain of Hsp90 (heat shock protein 90). diff --git a/topics/contributing/tutorials/create-new-tutorial-slides/slides.html b/topics/contributing/tutorials/create-new-tutorial-slides/slides.html index e2fcc94f4d71a5..8f4142bf5b48c2 100644 --- a/topics/contributing/tutorials/create-new-tutorial-slides/slides.html +++ b/topics/contributing/tutorials/create-new-tutorial-slides/slides.html @@ -21,6 +21,17 @@ - hexylena - bebatut - shiltemann + +recordings: +- captioners: + - shiltemann + date: '2021-02-15' + galaxy_version: '21.01' + length: 5M + youtube_id: Gm1MKAwuLxg + speakers: + - awspolly + --- # Slides diff --git a/topics/contributing/tutorials/create-new-tutorial/tutorial.md b/topics/contributing/tutorials/create-new-tutorial/tutorial.md index 7e449ac6d61109..ad3c13165a47f5 100644 --- a/topics/contributing/tutorials/create-new-tutorial/tutorial.md +++ b/topics/contributing/tutorials/create-new-tutorial/tutorial.md @@ -20,6 +20,17 @@ contributors: - hexylena - shiltemann - lldelisle + +recordings: +- captioners: + - EngyNasr + date: '2021-02-15' + galaxy_version: '21.01' + length: 42M + youtube_id: Vw9h5kvRH2k + speakers: + - bebatut + --- diff --git a/topics/contributing/tutorials/gitpod/tutorial.md b/topics/contributing/tutorials/gitpod/tutorial.md index ddf9a0e4dcfb7c..cdf9b8ccc33b67 100644 --- a/topics/contributing/tutorials/gitpod/tutorial.md +++ b/topics/contributing/tutorials/gitpod/tutorial.md @@ -13,6 +13,17 @@ key_points: - "GitPod can be used to serve the GTN training materials" contributors: - shiltemann + +recordings: +- captioners: + - shiltemann + date: '2021-02-15' + galaxy_version: '21.01' + length: 20M + youtube_id: _-wnsPttBCo + speakers: + - shiltemann + --- diff --git a/topics/data-science/tutorials/r-advanced/tutorial.md b/topics/data-science/tutorials/r-advanced/tutorial.md index 174d4bcf6b7da0..03cb724322b940 100644 --- a/topics/data-science/tutorials/r-advanced/tutorial.md +++ b/topics/data-science/tutorials/r-advanced/tutorial.md @@ -48,6 +48,18 @@ contributors: - gallantries tags: - R + +recordings: +- captioners: + - ksuderman + - nagoue + date: '2021-02-15' + galaxy_version: '21.01' + length: 1H + youtube_id: _hWBGdlICqg + speakers: + - fpsom + --- With HTS-Seq data analysis, we generated tables containing list of DE genes, their expression, some statistics, etc. We can manipulate these tables using Galaxy, as we saw in some tutorials, e.g. ["Reference-based RNA-Seq data analysis"]({% link topics/transcriptomics/tutorials/ref-based/tutorial.md %}), and create some visualisations. diff --git a/topics/data-science/tutorials/r-basics/tutorial.md b/topics/data-science/tutorials/r-basics/tutorial.md index 148233a239dc4f..ed0a3352e19c19 100644 --- a/topics/data-science/tutorials/r-basics/tutorial.md +++ b/topics/data-science/tutorials/r-basics/tutorial.md @@ -42,6 +42,19 @@ contributions: - tobyhodges funding: - gallantries + +recordings: +- captioners: + - s3by01 + - annefou + - kxk302 + date: '2021-02-15' + galaxy_version: '21.01' + length: 50M + youtube_id: MKkQDh5Nguc + speakers: + - fpsom + --- This tutorial will introduce R basics, using an RStudio Interactive Tool in Galaxy diff --git a/topics/dev/tutorials/architecture/slides.html b/topics/dev/tutorials/architecture/slides.html index 07b4d9e777f950..392521be9e9590 100644 --- a/topics/dev/tutorials/architecture/slides.html +++ b/topics/dev/tutorials/architecture/slides.html @@ -22,6 +22,7 @@ contributors: - jmchilton - bgruening + --- layout: true name: left-aligned @@ -526,7 +527,7 @@ This linting process captures many common problems as well as enforcing a common code style. The output reports from the CI process are relatively straightforward, but it is a good process -to lint changes and formatting your code before opening pull requests for them. +to lint changes and formatting your code before opening pull requests for them. Execute the `format` make command to format the backend code and then use `tox` to lint the formatted code. diff --git a/topics/dev/tutorials/debugging/tutorial.md b/topics/dev/tutorials/debugging/tutorial.md index 3e1789c7a8623b..cfedc41a12faed 100644 --- a/topics/dev/tutorials/debugging/tutorial.md +++ b/topics/dev/tutorials/debugging/tutorial.md @@ -37,6 +37,7 @@ requirements: tutorials: - architecture subtopic: core + --- In this tutorial we will demonstrate how to find and fix common types of bugs you may encounter as a contributor to Galaxy. We will step you through the process of finding and fixing a bug - from locating specific errors in the logs of Galaxy's GitHub Actions, to identifying their cause, developing a solution and committing your edits @@ -390,7 +391,7 @@ Client tests are tests written by developers to test front-end code. In the case > Fixing a failing client unit test > > 1. **Finding the failing test on GitHub** -> In this section we, again, can't demonstrate the remote output, so we descibe it instead. +> In this section we, again, can't demonstrate the remote output, so we descibe it instead. > One of the failing tests on GitHub says "Client Unit Testing / jest". Clicking on Details beside that failure, will open up a the terminal output from that test. Here you should be able to see what test is failing. > > > diff --git a/topics/dev/tutorials/tool-from-scratch/tutorial.md b/topics/dev/tutorials/tool-from-scratch/tutorial.md index e5891e96a44769..3bbcd56589aac7 100644 --- a/topics/dev/tutorials/tool-from-scratch/tutorial.md +++ b/topics/dev/tutorials/tool-from-scratch/tutorial.md @@ -25,6 +25,39 @@ contributors: - davebx - bernt-matthias + +recordings: +- captioners: + - gallardoalba + - astrovsky01 + date: '2023-05-18' + galaxy_version: '22.01' + length: 50M + youtube_id: QRoY1pFUY30 + speakers: + - gallardoalba + - astrovsky01 +- captioners: + - gallardoalba + date: '2022-03-11' + galaxy_version: '22.01' + length: 40M + youtube_id: 33L4B9ir0aQ + speakers: + - gallardoalba +- captioners: + - davebx + - astrovsky01 + - bernt-matthias + date: '2021-02-15' + galaxy_version: '21.01' + length: 1H40M + youtube_id: videoseries?list=PLNFLKDpdM3B9GhA0FryVV2YX3YL35rENK + speakers: + - davebx + - astrovsky01 + - bernt-matthias + --- diff --git a/topics/dev/tutorials/tool-generators-advanced/tutorial.md b/topics/dev/tutorials/tool-generators-advanced/tutorial.md index 78b62885bd1f1a..721f92c5915d8b 100644 --- a/topics/dev/tutorials/tool-generators-advanced/tutorial.md +++ b/topics/dev/tutorials/tool-generators-advanced/tutorial.md @@ -38,6 +38,17 @@ requirements: contributors: - fubar2 + +recordings: +- captioners: + - fubar2 + date: '2021-02-15' + galaxy_version: '21.01' + length: 7M + youtube_id: DK1eKz5TRs4 + speakers: + - fubar2 + --- > diff --git a/topics/dev/tutorials/tool-generators/tutorial.md b/topics/dev/tutorials/tool-generators/tutorial.md index ec4c250ee40ac9..44dffda762bcde 100644 --- a/topics/dev/tutorials/tool-generators/tutorial.md +++ b/topics/dev/tutorials/tool-generators/tutorial.md @@ -48,6 +48,17 @@ follow_up_training: contributors: - fubar2 - hexylena + +recordings: +- captioners: + - fubar2 + date: '2021-02-15' + galaxy_version: '21.01' + length: 30M + youtube_id: 8nKBfVM6240 + speakers: + - fubar2 + --- The Toolfactory and these tutorials are for developers and researchers learning about Galaxy, who routinely develop their own analysis scripts using diff --git a/topics/ecology/tutorials/Obitools-metabarcoding/tutorial.md b/topics/ecology/tutorials/Obitools-metabarcoding/tutorial.md index c2069621f79121..b3bca0b6d33433 100644 --- a/topics/ecology/tutorials/Obitools-metabarcoding/tutorial.md +++ b/topics/ecology/tutorials/Obitools-metabarcoding/tutorial.md @@ -29,6 +29,17 @@ contributions: - gallantries - pndb + +recordings: +- captioners: + - yvanlebras + date: '2022-03-03' + galaxy_version: '21.09' + length: 1H15M + youtube_id: o2cUvb_lmLs + speakers: + - yvanlebras + --- diff --git a/topics/ecology/tutorials/PAMPA-toolsuite-tutorial/tutorial.md b/topics/ecology/tutorials/PAMPA-toolsuite-tutorial/tutorial.md index 10c62509f6f657..d2c37c7bd0f8fd 100644 --- a/topics/ecology/tutorials/PAMPA-toolsuite-tutorial/tutorial.md +++ b/topics/ecology/tutorials/PAMPA-toolsuite-tutorial/tutorial.md @@ -38,6 +38,17 @@ contributions: - gallantries - pndb + +recordings: +- captioners: + - colineroyaux + date: '2022-03-03' + galaxy_version: '21.09' + length: 2H + youtube_id: leMPC2Ckehc + speakers: + - colineroyaux + --- diff --git a/topics/ecology/tutorials/biodiversity-data-exploration/tutorial.md b/topics/ecology/tutorials/biodiversity-data-exploration/tutorial.md index 0e2583c7bcd80d..b9e7e437fa9349 100644 --- a/topics/ecology/tutorials/biodiversity-data-exploration/tutorial.md +++ b/topics/ecology/tutorials/biodiversity-data-exploration/tutorial.md @@ -28,6 +28,17 @@ contributions: - pndb - fnso2019 + +recordings: +- captioners: + - onorvez + date: '2022-03-03' + galaxy_version: '21.09' + length: 1H + youtube_id: rWfPr-5F3GQ + speakers: + - onorvez + --- diff --git a/topics/epigenetics/tutorials/atac-seq/slides.html b/topics/epigenetics/tutorials/atac-seq/slides.html index d1ff8d72d98cd3..cd497d4ef7983e 100644 --- a/topics/epigenetics/tutorials/atac-seq/slides.html +++ b/topics/epigenetics/tutorials/atac-seq/slides.html @@ -18,6 +18,17 @@ - heylf - lldelisle + +recordings: +- captioners: + - lldelisle + date: '2021-02-15' + galaxy_version: '21.01' + length: 15M + youtube_id: 2jPNNd5YJNM + speakers: + - lldelisle + --- ### Where does my data come from? diff --git a/topics/epigenetics/tutorials/atac-seq/tutorial.md b/topics/epigenetics/tutorials/atac-seq/tutorial.md index 4a8b04d5f4afab..ee70e55952bc53 100644 --- a/topics/epigenetics/tutorials/atac-seq/tutorial.md +++ b/topics/epigenetics/tutorials/atac-seq/tutorial.md @@ -20,6 +20,17 @@ contributors: - mblue9 - heylf + +recordings: +- captioners: + - hrhotz + date: '2021-02-15' + galaxy_version: '21.01' + length: 1H50M + youtube_id: Zc-bl6ZfcX4 + speakers: + - heylf + --- diff --git a/topics/fair/tutorials/ro-crate-intro/tutorial.md b/topics/fair/tutorials/ro-crate-intro/tutorial.md index b8c85bdf97c061..54b231c207bf77 100644 --- a/topics/fair/tutorials/ro-crate-intro/tutorial.md +++ b/topics/fair/tutorials/ro-crate-intro/tutorial.md @@ -53,6 +53,16 @@ abbreviations: JSON: JavaScript Object Notation, a generic structured text-based data format JSON-LD: JSON Linked Data, a way to express Linked Data (RDF) using regular JSON RO-Crate: Research Object Crate; a way to package research data with structured FAIR metadata + +recordings: +- captioners: + - stain + date: '2023-04-14' + length: 17M + youtube_id: 5GYdN5B1tc8 + speakers: + - stain + --- This tutorial assumes you have already completed [An overview of the RO-Crate concept and its implementations](https://gallantries.github.io/video-library/videos/ro-crates/intro/slides/) and have a basic understanding of working with JSON. @@ -73,7 +83,7 @@ In this tutorial, meant to be read along with the [RO-Crate specification](https ## Making a folder into an RO-Crate -In the simplest form, to describe some data on disk, an _RO-Crate Metadata File_ is placed in a folder alongside a set of files or folders. +In the simplest form, to describe some data on disk, an _RO-Crate Metadata File_ is placed in a folder alongside a set of files or folders. First create a new folder `crate1/` and add a single file `data.csv` to represent our dataset: @@ -110,10 +120,10 @@ The preamble of `@context` and `@graph` are JSON-LD structures that help provide However, in the general case it should be sufficient to follow the RO-Crate JSON examples directly without deeper JSON-LD understanding. The RO-Crate Metadata Document contains a flat list of _entities_ as JSON objects in the `@graph` array. These entities are cross-referenced using `@id` identifiers, rather than being deeply nested. This is one major difference from JSON structures you may have experienced before. The `@type` keyword associates an object with a predefined type from the JSON-LD context. Almost any property can alternatively be used with an `[]` array to provide multiple values. -The rest of this tutorial, and indeed most of the [RO-Crate specification](https://www.researchobject.org/ro-crate/1.1/), specify which entities can be added to the `@graph` array. +The rest of this tutorial, and indeed most of the [RO-Crate specification](https://www.researchobject.org/ro-crate/1.1/), specify which entities can be added to the `@graph` array. -## RO-Crate Metadata descriptor +## RO-Crate Metadata descriptor The first JSON-LD _entity_ to add has the `@id` value of `ro-crate-metadata.json` to describe the JSON file itself: @@ -144,7 +154,7 @@ Next we'll add another entity to the `@graph` array, to describe the [RO-Crate R { "@id": "./", "@type": "Dataset", - "hasPart": [ + "hasPart": [ ] } @@ -161,9 +171,9 @@ This example is a folder-based RO-Crate stored on disk, and therefore absolute p When describing the [root entity](https://www.researchobject.org/ro-crate/1.1/root-data-entity.html#direct-properties-of-the-root-data-entity), the properties generally apply to the whole of the crate. For instance it is a good idea to give a description of why these resources are gathered in a crate, as well as giving the crate a name and license for FAIR reuse and citation. > -> +> > Try to add the `name`, `description` and `datePublished` properties, and for `license` as a cross-reference, use [SPDX](https://spdx.org/licenses/) license list to find the identifier for Creative Commons Zero or another license of your choice: -> +> > > > > ```json > > { @@ -173,7 +183,7 @@ When describing the [root entity](https://www.researchobject.org/ro-crate/1.1/ro > > "name": "Example crate", > > "description": "I created this example by following the tutorial", > > "datePublished": "2023-05-22T12:03:00+0100", -> > "license": { "@id": "http://spdx.org/licenses/CC0-1.0"} +> > "license": { "@id": "http://spdx.org/licenses/CC0-1.0"} > > } > > ``` > {: .solution} @@ -181,11 +191,11 @@ When describing the [root entity](https://www.researchobject.org/ro-crate/1.1/ro > License identifiers -> In the above solution, the identifier for CC0-1.0 is slightly -> different from their listed web page URI -- the former +> In the above solution, the identifier for CC0-1.0 is slightly +> different from their listed web page URI -- the former > is chosen to align with [SPDX JSON-LD identifiers](https://github.com/spdx/license-list-data/tree/main/jsonld), -> which unfortunately are not shown directly on their website as _permalinks_. -> It is not a requirement in RO-Crate to use permalinks for `@id` of entities like licenses, +> which unfortunately are not shown directly on their website as _permalinks_. +> It is not a requirement in RO-Crate to use permalinks for `@id` of entities like licenses, > it is nevertheless best practice to propagate permalinks where known. {: .tip} @@ -197,23 +207,23 @@ When describing the [root entity](https://www.researchobject.org/ro-crate/1.1/ro ## About cross-references In a RO-Crate Metadata Document, entities are cross-referenced using `@id` reference objects, rather than using deeply nested JSON objects. -In short, this _flattened JSON-LD_ style (shown below) allows any entity to reference any other entity, and RO-Crate consumers can directly find all the descriptions of a given entity as a single JSON object. +In short, this _flattened JSON-LD_ style (shown below) allows any entity to reference any other entity, and RO-Crate consumers can directly find all the descriptions of a given entity as a single JSON object. ![JSON block with id `ro-crate-metadata.json` has some attributes, `conformsTo` RO-Crate 1.2, and `about` referencing id `./`. In second JSON block with id ./ we see additional attributes such as its name and description.](../../images/ro-crate-intro/introduction-figure-1.svg "showing RO-Crate Metadata descriptor's about property pointing at the RO-Crate Root entity with matching @id") > -> +> > Consider the root Data Entity `./`, and add such a cross-reference to the file `data.csv` using the _property_ called `hasPart`: -> +> > > > > ```json > > { > > "@id": "./", > > "@type": "Dataset", -> > "hasPart": [ -> > {"@id": "data.csv"} +> > "hasPart": [ +> > {"@id": "data.csv"} > > ], > > "…": "…" > > } @@ -228,14 +238,14 @@ The RO-Crate root is always typed `Dataset`, though `@type` may in some cases ha > 1. Navigate the schema.org type list to find a subtype of `CreativeWork` that is suitable for a learning resource.. > 2. Modify the root entity's `@type` to be an array. > 3. Add the type name for learning resource at the end of the array. -> +> > > > > ```json > > { > > "@id": "./", > > "@type": ["Dataset", "LearningResource"], -> > "hasPart": [ -> > {"@id": "data.csv"} +> > "hasPart": [ +> > {"@id": "data.csv"} > > ], > > "…": "…" > > } @@ -243,28 +253,28 @@ The RO-Crate root is always typed `Dataset`, though `@type` may in some cases ha > {: .solution} {: .question} -The root has several metadata properties that describe the RO-Crate as a whole, considering it as a Research Object of collected resources. The section on [root data entity](https://www.researchobject.org/ro-crate/1.1/root-data-entity.html) details further the required and recommended properties of the root `./`. +The root has several metadata properties that describe the RO-Crate as a whole, considering it as a Research Object of collected resources. The section on [root data entity](https://www.researchobject.org/ro-crate/1.1/root-data-entity.html) details further the required and recommended properties of the root `./`. ## Data entities -A main type of resources collected in a Research Object is _data_ -- simplifying, we can consider data as any kind of file that can be opened in other programs. These are aggregated by the Root Dataset with the `hasPart` property. In this example we have an array with a single value, a reference to the entity describing the file `data.csv`. +A main type of resources collected in a Research Object is _data_ -- simplifying, we can consider data as any kind of file that can be opened in other programs. These are aggregated by the Root Dataset with the `hasPart` property. In this example we have an array with a single value, a reference to the entity describing the file `data.csv`. > Referencing external resources > RO-Crates can also contain data entities that are folders and Web resources, as well as non-File data like online databases -- see section on [data entities](https://www.researchobject.org/ro-crate/1.1/data-entities.html). {: .tip} -We should now be able to follow the `@id` reference for the corresponding _data entity_ JSON block for our CSV file, which we need to add to the `@graph` of the RO-Crate Metadata Document. +We should now be able to follow the `@id` reference for the corresponding _data entity_ JSON block for our CSV file, which we need to add to the `@graph` of the RO-Crate Metadata Document. > -> -> 1. Add a declaration for the CSV file as new entity with `@type` declared as `File`. -> 2. Give the file a human-readable `name` and `description` to detail it as _Rainfall data for Katoomba in NSW Australia, captured February 2022_. -> 3. To add this is a CSV file, declare the `encodingFormat` as the appropriate [IANA media type](https://www.iana.org/assignments/media-types/#text) string. -> +> +> 1. Add a declaration for the CSV file as new entity with `@type` declared as `File`. +> 2. Give the file a human-readable `name` and `description` to detail it as _Rainfall data for Katoomba in NSW Australia, captured February 2022_. +> 3. To add this is a CSV file, declare the `encodingFormat` as the appropriate [IANA media type](https://www.iana.org/assignments/media-types/#text) string. +> > > > > ```json > > { @@ -273,9 +283,9 @@ We should now be able to follow the `@id` reference for the corresponding _data > > "name": "Rainfall Katoomba 2022-02", > > "description": "Rainfall data for Katoomba, NSW Australia February 2022", > > "encodingFormat": "text/csv" -> > }, +> > }, > > ``` -> > +> > > {: .solution} {: .question} @@ -285,10 +295,10 @@ It is recommended that every entity has a human-readable `name`; as shown in the For more information on describing files and folders, including their recommended and required attributes, see section on [data entities](https://www.researchobject.org/ro-crate/1.1/data-entities.html). > -> +> > 1. Consider if the file content of `data.csv` is not covered by our overall license (CC0), but [Creative Commons BY-NC-SA 4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/) (which only permits non-commercial use) > 2. To override, add an `license` cross-reference property on this particular data entity -> +> > > > > ```json > > { @@ -298,7 +308,7 @@ For more information on describing files and folders, including their recommende > > "description": "Rainfall data for Katoomba, NSW Australia February 2022", > > "encodingFormat": "text/csv", > > "license": { "@id": "https://creativecommons.org/licenses/by-nc-sa/4.0/" } -> > }, +> > }, > > ``` > {: .solution} {: .question} @@ -320,9 +330,9 @@ You may notice the subtle difference between a _data entity_ that is conceptuall We have previously declared two different `license` cross-references. While following the URLs in this case explain the licenses well, it is also best practice to include a very brief summary of contextual entities in the RO-Crate Metadata Document. This is more important if the cross-reference do not use a permalink and may change over time. As a minimum, each referenced entity should have a `@type` and `name` property. It is also possible to add `url` for more information > -> +> > Add a contextual entity for each of the two licenses, see the [licensing](https://www.researchobject.org/ro-crate/1.1/contextual-entities.html#licensing-access-control-and-copyright) section for details: -> +> > > > > ```json > > { @@ -330,14 +340,14 @@ We have previously declared two different `license` cross-references. While foll > > "@type": "CreativeWork", > > "name": "CC BY-NC-SA 4.0 International", > > "description": "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International" -> > }, +> > }, > > { > > "@id": "http://spdx.org/licenses/CC0-1.0", > > "@type": "CreativeWork", > > "name": "CC0-1.0", > > "description": "Creative Commons Zero v1.0 Universal", > > "url": "https://creativecommons.org/publicdomain/zero/1.0/" -> > }, +> > }, > > ``` > {: .solution} {: .question} @@ -351,12 +361,12 @@ An additional exercise is to try to unify the two entites so that both use spdx Moving back to the RO-Crate root `./`, let's specify who are the authors of the crate. > -> +> > 1. Add yourself as an [`author`](https://www.researchobject.org/ro-crate/1.1/contextual-entities.html#people) of the crate using the type `Person` -> 2. Include your preferred name. +> 2. Include your preferred name. > 3. If you don't have an [ORCID](https://orcid.org/), you may use either the URL of your main home page at your institution, or a crate-local identifier like `#alice`. > 4. Include your `affiliation` as a string value. -> +> > > > > ```json > > { @@ -367,7 +377,7 @@ Moving back to the RO-Crate root `./`, let's specify who are the authors of the > > }, > > { > > "@id": "https://orcid.org/0000-0002-1825-0097", -> > "@type": "Person", +> > "@type": "Person", > > "name": "Josiah Carberry", > > "affiliation": "Brown University" > > } @@ -381,15 +391,15 @@ Moving back to the RO-Crate root `./`, let's specify who are the authors of the {: .tip} > -> -> 1. "Unroll" your `affiliation` of the person as cross-reference to another contextual entity, typed as an `Organization`. +> +> 1. "Unroll" your `affiliation` of the person as cross-reference to another contextual entity, typed as an `Organization`. > 2. You can use [ROR](https://ror.org/) to find an identifier for most educational/research institutions, or you can use the main web page of your organization as its `@id`. -> +> > > > > ```json > > { > > "@id": "https://orcid.org/0000-0002-1825-0097", -> > "@type": "Person", +> > "@type": "Person", > > "name": "Josiah Carberry" > > }, > > { @@ -407,10 +417,10 @@ Moving back to the RO-Crate root `./`, let's specify who are the authors of the The reuse of existing identifiers is important for both persons and organization from a FAIR perspective, as their names may not be globally unique. > -> -> 1. Now imagine you are going to publish the RO-Crate on your institution's web pages. +> +> 1. Now imagine you are going to publish the RO-Crate on your institution's web pages. > 2. Cross-reference the same Organization entity with `publisher` from the RO-Crate Root entitity: -> +> > > > > ```json > > { @@ -429,7 +439,7 @@ The reuse of existing identifiers is important for both persons and organization As we made this RO-Crate Metadata File by hand, it's good to check for any JSON errors, such as missing/extra `,` or unclosed `"` quotes. Try pasting the file content into the [JSON-LD Playground](https://json-ld.org/playground/). It should show up any errors, for example: ``` -JSON markup - SyntaxError: JSON.parse: expected `','` or `']'` after array element +JSON markup - SyntaxError: JSON.parse: expected `','` or `']'` after array element at line 29 column 5 of the JSON data ``` @@ -470,7 +480,7 @@ This verbose listing of the JSON-LD shows how the `@context` has correctly expan ![Visualized in the JSON-LD Playground](../../images/ro-crate-intro/jsonld-playground-visualized.png) As the RO-Crate Metadata Document is valid JSON-LD it is also possible to process it using Linked Data technologies such as triple stores and SPARQL queries. It is beyond the scope of this tutorial to explain this aspect fully, but interested readers should consider how to [handle relative URI references](https://www.researchobject.org/ro-crate/1.1/appendix/relative-uris.html). -As an example, try the _Table_ button and notice that the entities with relative identifiers are not included. This is because when converting to RDF you need absolute URIs which do not readily exist when a crate is stored on disk, we've not decided where the crate is to be published yet. +As an example, try the _Table_ button and notice that the entities with relative identifiers are not included. This is because when converting to RDF you need absolute URIs which do not readily exist when a crate is stored on disk, we've not decided where the crate is to be published yet. ## Advanced: Converting JSON-LD to triples @@ -493,23 +503,23 @@ Above `arcp://uuid,deffa754-c764-4e04-aabf-e600c6200553/` is a randomly generate ## HTML preview -An RO-Crate can be distributed on disk, in a packaged format such as a zip file or disk image, or placed on a static website. In any of these cases, an RO-Crate can have an accompanying HTML version (`ro-crate-metadata.html`) designed to be human-readable. +An RO-Crate can be distributed on disk, in a packaged format such as a zip file or disk image, or placed on a static website. In any of these cases, an RO-Crate can have an accompanying HTML version (`ro-crate-metadata.html`) designed to be human-readable. ![Example dataset for RO-Crate specification](../../images/ro-crate-intro/ro-crate-preview-example.png) > -> +> > Try navigating the [preview of the running example](rainfall-1.2.1/ro-crate-preview.html) and find: > > 1. What is the license of the rainfall CSV? > 2. What is the affiliation of the crate's author? > 3. What does the Validity Check inspect > 4. What is not covered by this check? -> +> > > > > 1. CC BY-NC-SA 4.0 International > > 2. Brown University -> > 3. The context, and for root dataset: existance, valid identifier, name, description, license and date published. +> > 3. The context, and for root dataset: existance, valid identifier, name, description, license and date published. > > 4. The other entities were not checked, e.g. the `affiliation` of the author. > {: .solution} {: .question} @@ -542,9 +552,9 @@ You have completed making a basic RO-Crate. You may try any of the following: - Try briefly describing [provenance](https://www.researchobject.org/ro-crate/1.1/provenance.html) or [software](https://www.researchobject.org/ro-crate/1.1/workflows.html) for any additional data entities you have added. > Complete RO-Crate Metadata Document -> +> > The final RO-Crate Metadata Document constructed in this tutorial should look something like: -> +> > > > > ```json > > { @@ -582,17 +592,17 @@ You have completed making a basic RO-Crate. You may try any of the following: > > "@type": "CreativeWork", > > "name": "CC BY-NC-SA 4.0 International", > > "description": "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International" -> > }, +> > }, > > { > > "@id": "http://spdx.org/licenses/CC0-1.0", > > "@type": "CreativeWork", > > "name": "CC0-1.0", > > "description": "Creative Commons Zero v1.0 Universal", > > "url": "https://creativecommons.org/publicdomain/zero/1.0/" -> > }, +> > }, > > { > > "@id": "https://orcid.org/0000-0002-1825-0097", -> > "@type": "Person", +> > "@type": "Person", > > "name": "Josiah Carberry", > > "affiliation": { > > "@id": "https://ror.org/05gq02987" @@ -620,7 +630,7 @@ Licensed under the Apache License, Version 2.0 (the “License”); you may not http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ## Changes diff --git a/topics/galaxy-interface/tutorials/collections/tutorial.md b/topics/galaxy-interface/tutorials/collections/tutorial.md index 7237ef4a561789..552daa1e0b61e0 100644 --- a/topics/galaxy-interface/tutorials/collections/tutorial.md +++ b/topics/galaxy-interface/tutorials/collections/tutorial.md @@ -22,6 +22,24 @@ contributors: - nekrut subtopic: manage + +recordings: +- youtube_id: uN6nP3I7QLE + captioners: + - delphine-l + date: '2023-05-09' + length: 13M + speakers: + - delphine-l +- captioners: + - annefou + - mariipia10 + date: '2021-08-09' + length: 11M + youtube_id: uZUt9XIHUQo + speakers: + - nekrut + --- {% snippet faqs/gtn/galaxy_tested_with.md version="22.01" %} diff --git a/topics/galaxy-interface/tutorials/history/tutorial.md b/topics/galaxy-interface/tutorials/history/tutorial.md index 3c8662c2884e28..00cced02e613b5 100644 --- a/topics/galaxy-interface/tutorials/history/tutorial.md +++ b/topics/galaxy-interface/tutorials/history/tutorial.md @@ -30,6 +30,17 @@ contributions: - bgruening - hexylena subtopic: histories + +recordings: +- youtube_id: C3_HSgbbDWc + speakers: + - shiltemann + captioners: + - shiltemann + length: 18M + date: '2023-05-19' + galaxy_version: '23.01' + --- {% snippet faqs/gtn/galaxy_tested_with.md version="23.0" %} diff --git a/topics/galaxy-interface/tutorials/ncbi-sarf/slides.html b/topics/galaxy-interface/tutorials/ncbi-sarf/slides.html index ee58c9e0ab65b2..9450f24ed3ae26 100644 --- a/topics/galaxy-interface/tutorials/ncbi-sarf/slides.html +++ b/topics/galaxy-interface/tutorials/ncbi-sarf/slides.html @@ -18,6 +18,17 @@ subtopic: upload contributors: - jontrow + +recordings: +- captioners: + - prodromus + date: '2021-05-15' + galaxy_version: '21.01' + length: 15M + youtube_id: siLP71B9gm4 + speakers: + - prodromus + --- # Why have SRA data in the cloud? diff --git a/topics/galaxy-interface/tutorials/ncbi-sarf/tutorial.md b/topics/galaxy-interface/tutorials/ncbi-sarf/tutorial.md index d4a772eb1ba221..c632af04938ec0 100644 --- a/topics/galaxy-interface/tutorials/ncbi-sarf/tutorial.md +++ b/topics/galaxy-interface/tutorials/ncbi-sarf/tutorial.md @@ -26,6 +26,17 @@ requirements: topic_name: galaxy-interface tutorials: - upload-rules + +recordings: +- captioners: + - prodromus + date: '2021-05-15' + galaxy_version: '21.01' + length: 40M + youtube_id: ogu-NBTP-DM + speakers: + - prodromus + --- Traditionally, after a list of run accessions has been filtered on the NCBI website, the accessions are used to download and extract fastq using the SRA toolkit to enter into the next steps of the workflow. A newer compressed data type, generated from raw submitted data containing SARS-CoV-2 sequence, is also accessible to Galaxy users from SRA in the Cloud. diff --git a/topics/galaxy-interface/tutorials/upload-rules-advanced/tutorial.md b/topics/galaxy-interface/tutorials/upload-rules-advanced/tutorial.md index ef93d212e42da3..2d34acd1593944 100644 --- a/topics/galaxy-interface/tutorials/upload-rules-advanced/tutorial.md +++ b/topics/galaxy-interface/tutorials/upload-rules-advanced/tutorial.md @@ -24,6 +24,18 @@ contributors: - jmchilton - hexylena subtopic: upload + +recordings: +- captioners: + - assuntad23 + date: '2021-02-15' + galaxy_version: '21.01' + length: 22M + youtube_id: 1MWHVRWXpyA + speakers: + - assuntad23 + + --- diff --git a/topics/galaxy-interface/tutorials/upload-rules/tutorial.md b/topics/galaxy-interface/tutorials/upload-rules/tutorial.md index 2938f47d428e88..1389654e3dac2f 100644 --- a/topics/galaxy-interface/tutorials/upload-rules/tutorial.md +++ b/topics/galaxy-interface/tutorials/upload-rules/tutorial.md @@ -31,6 +31,17 @@ contributors: - jmchilton - hexylena subtopic: upload + +recordings: +- captioners: + - assuntad23 + date: '2021-02-15' + galaxy_version: '21.01' + length: 22M + youtube_id: 1MWHVRWXpyA + speakers: + - assuntad23 + --- diff --git a/topics/galaxy-interface/tutorials/workflow-automation/tutorial.md b/topics/galaxy-interface/tutorials/workflow-automation/tutorial.md index adc44b63d2e302..4f11b27b6d6021 100644 --- a/topics/galaxy-interface/tutorials/workflow-automation/tutorial.md +++ b/topics/galaxy-interface/tutorials/workflow-automation/tutorial.md @@ -28,6 +28,16 @@ contributors: - simonbray - wm75 subtopic: workflows + +recordings: +- captioners: + - simonbray + date: '2021-02-15' + galaxy_version: '21.01' + length: 30M + youtube_id: o39QjVnLG68 + speakers: + - simonbray --- diff --git a/topics/galaxy-interface/tutorials/workflow-parameters/tutorial.md b/topics/galaxy-interface/tutorials/workflow-parameters/tutorial.md index 0ba00881321158..1fdc4a0d3392c4 100644 --- a/topics/galaxy-interface/tutorials/workflow-parameters/tutorial.md +++ b/topics/galaxy-interface/tutorials/workflow-parameters/tutorial.md @@ -21,6 +21,16 @@ contributors: - hexylena level: Intermediate subtopic: workflows + +recordings: +- captioners: + - mvdbeek + date: '2022-07-06' + length: 30M + youtube_id: otvEuDlCye0 + speakers: + - mvdbeek + --- diff --git a/topics/galaxy-interface/tutorials/workflow-reports/tutorial.md b/topics/galaxy-interface/tutorials/workflow-reports/tutorial.md index b55351b4b41bad..536eb0f3e5307d 100644 --- a/topics/galaxy-interface/tutorials/workflow-reports/tutorial.md +++ b/topics/galaxy-interface/tutorials/workflow-reports/tutorial.md @@ -33,6 +33,16 @@ contributions: - gallantries level: Intermediate subtopic: workflows + +recordings: +- captioners: + - shiltemann + date: '2022-07-06' + length: 30M + youtube_id: hrZhpsiGaPM + speakers: + - shiltemann + --- diff --git a/topics/genome-annotation/tutorials/annotation-with-prokka/slides.html b/topics/genome-annotation/tutorials/annotation-with-prokka/slides.html index 01b0eb5c84df7e..de8e49ac338555 100644 --- a/topics/genome-annotation/tutorials/annotation-with-prokka/slides.html +++ b/topics/genome-annotation/tutorials/annotation-with-prokka/slides.html @@ -22,6 +22,17 @@ - annasyme - tseemann - slugger70 + +recordings: +- captioners: + - abretaud + date: '2021-02-15' + galaxy_version: '21.01' + length: 3M + youtube_id: eqHy-Cc7k4Q + speakers: + - awspolly + --- 2 (distance = 1.681) -> +> > -------------------------- > ``` {: .code-out} - -The classical {UEP} method scans the electrostatic potential looking for minima in it, which -are associated to potential muon stopping sites. This method further classifies those minima -by estimating their "attractor size". If you look at the picture in the figure below, the two -minima have similar values, but the capacity to "attract" a muon is greater for the minima on the left, -which makes it more likely to be a muon stopping site. The probability of a potential stopping site is + +The classical {UEP} method scans the electrostatic potential looking for minima in it, which +are associated to potential muon stopping sites. This method further classifies those minima +by estimating their "attractor size". If you look at the picture in the figure below, the two +minima have similar values, but the capacity to "attract" a muon is greater for the minima on the left, +which makes it more likely to be a muon stopping site. The probability of a potential stopping site is given by the size of the cluster that forms around its associated minima. ![Graphical illustration of how muons may be clustered in two different potential minima that have similar energies with a simple 1D plot, and two real examples (CaF and YF3) showing the potential energy in a 3D structure.](../../images/theoretical_basis_of_our_variant_of_the_UEP_method.png) diff --git a/topics/microbiome/tutorials/beer-data-analysis/tutorial.md b/topics/microbiome/tutorials/beer-data-analysis/tutorial.md index 1fc5db1ef21a00..651349a30989ad 100644 --- a/topics/microbiome/tutorials/beer-data-analysis/tutorial.md +++ b/topics/microbiome/tutorials/beer-data-analysis/tutorial.md @@ -40,6 +40,17 @@ edam_ontology: - topic_3697 # Microbial ecology - topic_0637 # Taxonomy - topic_0080 # Sequence analysis + +recordings: +- youtube_id: PJeS06yiJUM + speakers: + - bebatut + captioners: + - sophia120199 + date: '2023-05-10' + galaxy_version: '23.01' + length: 1H5M + --- diff --git a/topics/microbiome/tutorials/introduction/slides.html b/topics/microbiome/tutorials/introduction/slides.html index 82cfe3d3095020..d7c720e296c7cb 100644 --- a/topics/microbiome/tutorials/introduction/slides.html +++ b/topics/microbiome/tutorials/introduction/slides.html @@ -10,6 +10,17 @@ contributors: - bebatut - shiltemann + +recordings: +- captioners: + - shiltemann + date: '2021-02-15' + galaxy_version: '21.01' + length: 15M + youtube_id: 9OY1mklWuK0 + speakers: + - shiltemann + --- ## Microbiome Analysis diff --git a/topics/microbiome/tutorials/metagenomics-assembly/tutorial.md b/topics/microbiome/tutorials/metagenomics-assembly/tutorial.md index aefbd8d0df2b9d..184415b77c5f90 100644 --- a/topics/microbiome/tutorials/metagenomics-assembly/tutorial.md +++ b/topics/microbiome/tutorials/metagenomics-assembly/tutorial.md @@ -42,6 +42,18 @@ tags: - microgalaxy redirect_from: - /topics/metagenomics/tutorials/metagenomics-assembly/tutorial + +recordings: +- youtube_id: 1ZYGy85Im7w + date: '2023-05-02' + speakers: + - bebatut + galaxy_version: '23.01' + length: 1H + captioners: + - bebatut + - sophia120199 + --- diff --git a/topics/microbiome/tutorials/metatranscriptomics-short/tutorial.md b/topics/microbiome/tutorials/metatranscriptomics-short/tutorial.md index e53875a80df35b..efda88c41a2760 100644 --- a/topics/microbiome/tutorials/metatranscriptomics-short/tutorial.md +++ b/topics/microbiome/tutorials/metatranscriptomics-short/tutorial.md @@ -41,6 +41,29 @@ edam_ontology: - topic_0637 # Taxonomy - topic_1775 # Function analysis - topic_0080 # Sequence analysis + +recordings: +- youtube_id: HNYop3vLpoM + date: '2023-05-17' + speakers: + - paulzierep + captioners: + - paulzierep + length: 1H5M + galaxy_version: '23.01' +- captioners: + - EngyNasr + - shiltemann + date: '2021-02-15' + galaxy_version: '21.01' + length: 1H30M + youtube_id: EMaos5u1_a8 + speakers: + - pratikdjagtap + - timothygriffin + - subinamehta + - shiltemann + --- diff --git a/topics/microbiome/tutorials/metatranscriptomics/tutorial.md b/topics/microbiome/tutorials/metatranscriptomics/tutorial.md index 5e2d3e99f2b572..101aa3090e4c4a 100644 --- a/topics/microbiome/tutorials/metatranscriptomics/tutorial.md +++ b/topics/microbiome/tutorials/metatranscriptomics/tutorial.md @@ -43,6 +43,30 @@ edam_ontology: - topic_0637 # Taxonomy - topic_1775 # Function analysis - topic_0080 # Sequence analysis + +recordings: +- youtube_id: HNYop3vLpoM + date: '2023-05-17' + speakers: + - paulzierep + captioners: + - paulzierep + length: 1H5M + galaxy_version: '23.01' +- captioners: + - EngyNasr + - shiltemann + date: '2021-02-15' + galaxy_version: '21.01' + length: 1H30M + youtube_id: EMaos5u1_a8 + speakers: + - pratikdjagtap + - timothygriffin + - subinamehta + - shiltemann + + --- {% include topics/microbiome/tutorials/metatranscriptomics/content.md short=false %} diff --git a/topics/microbiome/tutorials/mothur-miseq-sop-short/tutorial.md b/topics/microbiome/tutorials/mothur-miseq-sop-short/tutorial.md index 480d93b3c8fc81..13040fcdb8c28b 100644 --- a/topics/microbiome/tutorials/mothur-miseq-sop-short/tutorial.md +++ b/topics/microbiome/tutorials/mothur-miseq-sop-short/tutorial.md @@ -32,6 +32,29 @@ edam_ontology: - topic_0637 # Taxonomy - topic_0080 # Sequence analysis - topic_4038 # Metabarcoding + +recordings: + - youtube_id: CGSBeCW3rpA # request upload to the GTN YouTube channel here + date: '2023-05-19' + length: 1H37M + speakers: + - shiltemann + captioners: + - shiltemann + galaxy_version: '23.01' # galaxy version can be found under Help->About inside Galaxy + - youtube_id: mto4Nl-q7Kk + date: '2021-02-15' + speakers: + - shiltemann + captioners: + - shiltemann + galaxy_version: '21.01' + length: 1H40M + description: This recording was created as part of the Smörgåsbord 2023 event. + type: Lecture & Tutorial + archive-id: smorgasbord2023 + + --- {% include topics/microbiome/tutorials/mothur-miseq-sop/content.md short=true %} diff --git a/topics/microbiome/tutorials/mothur-miseq-sop/content.md b/topics/microbiome/tutorials/mothur-miseq-sop/content.md index bd3389301cddd7..200d205b8af343 100644 --- a/topics/microbiome/tutorials/mothur-miseq-sop/content.md +++ b/topics/microbiome/tutorials/mothur-miseq-sop/content.md @@ -5,6 +5,7 @@ redirect_from: - /topics/metagenomics/tutorials/mothur-miseq-sop-short/content --- + {% if include.short %} {% assign other_tutorial = "../mothur-miseq-sop/tutorial.html" %} {% assign other_tutorial_name = "extended" %} diff --git a/topics/microbiome/tutorials/pathogen-detection-from-nanopore-foodborne-data/tutorial.md b/topics/microbiome/tutorials/pathogen-detection-from-nanopore-foodborne-data/tutorial.md index 18647f7df36a8e..cf58ca2b901dbb 100644 --- a/topics/microbiome/tutorials/pathogen-detection-from-nanopore-foodborne-data/tutorial.md +++ b/topics/microbiome/tutorials/pathogen-detection-from-nanopore-foodborne-data/tutorial.md @@ -44,6 +44,17 @@ edam_ontology: - topic_0196 # Sequence assembly - topic_0634 # Pathology - topic_0080 # Sequence analysis + +recordings: +- youtube_id: gQHb_jkj-Z0 + date: '2023-05-01' + galaxy_version: '23.01' + length: 1H45M + speakers: + - EngyNasr + captioners: + - EngyNasr + --- diff --git a/topics/microbiome/tutorials/plasmid-metagenomics-nanopore/tutorial.md b/topics/microbiome/tutorials/plasmid-metagenomics-nanopore/tutorial.md index ade73bff58d10a..e9a25e041a3ca7 100644 --- a/topics/microbiome/tutorials/plasmid-metagenomics-nanopore/tutorial.md +++ b/topics/microbiome/tutorials/plasmid-metagenomics-nanopore/tutorial.md @@ -39,6 +39,17 @@ edam_ontology: - topic_3324 # Infectious disease - topic_0080 # Sequence analysis - topic_4013 # Antimicrobial resistance + +recordings: +- captioners: + - bebatut + date: '2021-02-15' + galaxy_version: '21.01' + length: 45M + youtube_id: ECIl8ZU1Wko + speakers: + - willemdek11 + --- # Overview diff --git a/topics/microbiome/tutorials/taxonomic-profiling/tutorial.md b/topics/microbiome/tutorials/taxonomic-profiling/tutorial.md index 2ce33a274d8c03..02dffb23bd5d19 100644 --- a/topics/microbiome/tutorials/taxonomic-profiling/tutorial.md +++ b/topics/microbiome/tutorials/taxonomic-profiling/tutorial.md @@ -38,6 +38,17 @@ edam_ontology: - topic_3697 # Microbial ecology - topic_0637 # Taxonomy - topic_0080 # Sequence analysis + +recordings: +- youtube_id: jszMnBKm6ig + speakers: + - bebatut + captioners: + - sophia120199 + date: '2023-05-16' + length: 1H15M + galaxy_version: '23.01' + --- diff --git a/topics/proteomics/tutorials/encyclopedia/tutorial.md b/topics/proteomics/tutorials/encyclopedia/tutorial.md index 61b0645c77ea9b..1c37e34b52c020 100644 --- a/topics/proteomics/tutorials/encyclopedia/tutorial.md +++ b/topics/proteomics/tutorials/encyclopedia/tutorial.md @@ -25,6 +25,17 @@ contributors: - timothygriffin + +recordings: +- captioners: + - mtekman + date: '2021-06-25' + galaxy_version: '21.01' + length: 30M + youtube_id: eCEjHtbZLVg + speakers: + - emmaleith + --- diff --git a/topics/proteomics/tutorials/introduction/slides.html b/topics/proteomics/tutorials/introduction/slides.html index 9e0491cbefbaf7..22276228621dc9 100644 --- a/topics/proteomics/tutorials/introduction/slides.html +++ b/topics/proteomics/tutorials/introduction/slides.html @@ -11,6 +11,17 @@ contributors: - foellmelanie + +recordings: +- captioners: + - foellmelanie + date: '2021-02-15' + galaxy_version: '21.01' + length: 20M + youtube_id: 2C96AvrFT38 + speakers: + - awspolly + --- # Proteomics diff --git a/topics/proteomics/tutorials/maxquant-msstats-dda-lfq/tutorial.md b/topics/proteomics/tutorials/maxquant-msstats-dda-lfq/tutorial.md index e2e0755dd20257..f866dae339a37f 100644 --- a/topics/proteomics/tutorials/maxquant-msstats-dda-lfq/tutorial.md +++ b/topics/proteomics/tutorials/maxquant-msstats-dda-lfq/tutorial.md @@ -24,6 +24,17 @@ requirements: - maxquant-label-free subtopic: id-quant tags: [label-free] + +recordings: +- captioners: + - foellmelanie + date: '2021-02-15' + galaxy_version: '21.01' + length: 1H + youtube_id: IXdLAt2PAT4 + speakers: + - foellmelanie + --- diff --git a/topics/proteomics/tutorials/metaproteomics/tutorial.md b/topics/proteomics/tutorials/metaproteomics/tutorial.md index 5d87eb22826422..75a72eac9d9357 100644 --- a/topics/proteomics/tutorials/metaproteomics/tutorial.md +++ b/topics/proteomics/tutorials/metaproteomics/tutorial.md @@ -27,6 +27,17 @@ edam_ontology: - topic_3922 # Proteogenomics - topic_3050 # Biodiversity - topic_0637 # Taxonomy + +recordings: +- captioners: + - emmaleith + date: '2021-02-15' + galaxy_version: '21.01' + length: 1H + youtube_id: 3_yaPp-RCFw + speakers: + - pratikdjagtap + --- In this metaproteomics tutorial we will identify expressed proteins from a complex bacterial community sample. diff --git a/topics/proteomics/tutorials/proteogenomics-dbcreation/tutorial.md b/topics/proteomics/tutorials/proteogenomics-dbcreation/tutorial.md index 3fc23adc9473e1..3a93daf078fc70 100644 --- a/topics/proteomics/tutorials/proteogenomics-dbcreation/tutorial.md +++ b/topics/proteomics/tutorials/proteogenomics-dbcreation/tutorial.md @@ -28,6 +28,17 @@ contributors: - pravs3683 subtopic: multi-omics tags: [proteogenomics] + +recordings: +- captioners: + - emmaleith + date: '2021-02-15' + galaxy_version: '21.01' + length: 40M + youtube_id: b_kZf8mXHdo + speakers: + - jj-umn + --- diff --git a/topics/proteomics/tutorials/proteogenomics-dbsearch/tutorial.md b/topics/proteomics/tutorials/proteogenomics-dbsearch/tutorial.md index a587768e803b6c..00ab9c0e1ba4d3 100644 --- a/topics/proteomics/tutorials/proteogenomics-dbsearch/tutorial.md +++ b/topics/proteomics/tutorials/proteogenomics-dbsearch/tutorial.md @@ -33,6 +33,17 @@ contributors: - pravs3683 subtopic: multi-omics tags: [proteogenomics] + +recordings: +- captioners: + - emmaleith + date: '2021-02-15' + galaxy_version: '21.01' + length: 30M + youtube_id: q1OjmTcbvBA + speakers: + - andrewr + --- diff --git a/topics/proteomics/tutorials/proteogenomics-novel-peptide-analysis/tutorial.md b/topics/proteomics/tutorials/proteogenomics-novel-peptide-analysis/tutorial.md index 90b3026b744358..67be973d456fc5 100644 --- a/topics/proteomics/tutorials/proteogenomics-novel-peptide-analysis/tutorial.md +++ b/topics/proteomics/tutorials/proteogenomics-novel-peptide-analysis/tutorial.md @@ -29,6 +29,17 @@ contributors: - pravs3683 subtopic: multi-omics tags: [proteogenomics] + +recordings: +- captioners: + - subinamehta + date: '2021-02-15' + galaxy_version: '21.01' + length: 40M + youtube_id: Ku274KwFh1Y + speakers: + - subinamehta + --- diff --git a/topics/proteomics/tutorials/proteome_annotation/tutorial.md b/topics/proteomics/tutorials/proteome_annotation/tutorial.md index 81c9456cff7e41..d662b868ab108f 100644 --- a/topics/proteomics/tutorials/proteome_annotation/tutorial.md +++ b/topics/proteomics/tutorials/proteome_annotation/tutorial.md @@ -29,6 +29,17 @@ contributors: subtopic: post-process tags: [DDA, human] + +recordings: +- captioners: + - combesf + date: '2021-02-15' + galaxy_version: '21.01' + length: 30M + youtube_id: qG7GZc6BeGY + speakers: + - yvesvdb + --- diff --git a/topics/sequence-analysis/tutorials/mapping/slides.html b/topics/sequence-analysis/tutorials/mapping/slides.html index 0d495a3ff14def..cff4630af32b3c 100644 --- a/topics/sequence-analysis/tutorials/mapping/slides.html +++ b/topics/sequence-analysis/tutorials/mapping/slides.html @@ -25,6 +25,17 @@ - EngyNasr - gallardoalba - gallantries + +recordings: +- captioners: + - blankenberg + date: '2021-02-15' + galaxy_version: '21.01' + length: 10M + youtube_id: 7FhHb8EV3EU + speakers: + - pvanheus + --- # Example NGS pipeline diff --git a/topics/sequence-analysis/tutorials/mapping/tutorial.md b/topics/sequence-analysis/tutorials/mapping/tutorial.md index 513c7ba31f5518..2991fa82364f5d 100644 --- a/topics/sequence-analysis/tutorials/mapping/tutorial.md +++ b/topics/sequence-analysis/tutorials/mapping/tutorial.md @@ -40,6 +40,17 @@ contributors: - joachimwolff - bebatut - hexylena + +recordings: +- captioners: + - shiltemann + date: '2021-02-15' + galaxy_version: '21.01' + length: 20M + youtube_id: 1wm-62E2NkY + speakers: + - pvanheus + --- Sequencing produces a collection of sequences without genomic context. We do not know to which part of the genome the sequences correspond to. Mapping the reads of an experiment to a reference genome is a key step in modern genomic data analysis. With the mapping the reads are assigned to a specific location in the genome and insights like the expression level of genes can be gained. diff --git a/topics/sequence-analysis/tutorials/quality-control/slides.html b/topics/sequence-analysis/tutorials/quality-control/slides.html index aa98fea033fbe1..2559bd2c76ea02 100644 --- a/topics/sequence-analysis/tutorials/quality-control/slides.html +++ b/topics/sequence-analysis/tutorials/quality-control/slides.html @@ -29,6 +29,19 @@ - stephanierobin - gallantries + +recordings: +- captioners: + - ennovytje + - nagoue + - hexylena + date: '2021-02-15' + galaxy_version: '21.01' + length: 40M + youtube_id: BWonTPS4zB8 + speakers: + - heylf + --- # Why Quality Control? diff --git a/topics/sequence-analysis/tutorials/quality-control/tutorial.md b/topics/sequence-analysis/tutorials/quality-control/tutorial.md index e4331fa1cc49c8..2e1d2131178dce 100644 --- a/topics/sequence-analysis/tutorials/quality-control/tutorial.md +++ b/topics/sequence-analysis/tutorials/quality-control/tutorial.md @@ -38,6 +38,26 @@ contributors: - gallantries - neoformit + +recordings: +- youtube_id: coaMGvZazoc + length: 50M + speakers: + - LonsBio + captioners: + - LonsBio + date: '2023-05-19' +- captioners: + - bebatut + - nagoue + - shiltemann + date: '2021-02-15' + galaxy_version: '21.01' + length: 1H10M + youtube_id: QJRlX2hWDKM + speakers: + - heylf + --- diff --git a/topics/single-cell/tutorials/bulk-music/tutorial.md b/topics/single-cell/tutorials/bulk-music/tutorial.md index ad124379184ecb..6c98cc13f45bf9 100644 --- a/topics/single-cell/tutorials/bulk-music/tutorial.md +++ b/topics/single-cell/tutorials/bulk-music/tutorial.md @@ -38,6 +38,18 @@ follow_up_training: gitter: Galaxy-Training-Network/galaxy-single-cell + +recordings: +- captioners: + - mtekman + - nomadscientist + date: '2021-03-08' + galaxy_version: '21.09' + length: 20M + youtube_id: nfAUXP3W-jM + speakers: + - nomadscientist + --- diff --git a/topics/single-cell/tutorials/scrna-case_JUPYTER-trajectories/tutorial.md b/topics/single-cell/tutorials/scrna-case_JUPYTER-trajectories/tutorial.md index c833f18233f5b4..b975edfb47c624 100644 --- a/topics/single-cell/tutorials/scrna-case_JUPYTER-trajectories/tutorial.md +++ b/topics/single-cell/tutorials/scrna-case_JUPYTER-trajectories/tutorial.md @@ -47,6 +47,17 @@ notebook: language: python snippet: topics/single-cell/tutorials/scrna-case_JUPYTER-trajectories/preamble.md + +recordings: +- captioners: + - nomadscientist + date: '2021-02-15' + galaxy_version: '21.01' + length: 10M + youtube_id: VEyTmwDIgPI + speakers: + - nomadscientist + --- # Run the tutorial! diff --git a/topics/single-cell/tutorials/scrna-case_alevin-combine-datasets/tutorial.md b/topics/single-cell/tutorials/scrna-case_alevin-combine-datasets/tutorial.md index 6ea94ec8ff6442..863c40b562baea 100644 --- a/topics/single-cell/tutorials/scrna-case_alevin-combine-datasets/tutorial.md +++ b/topics/single-cell/tutorials/scrna-case_alevin-combine-datasets/tutorial.md @@ -70,6 +70,24 @@ follow_up_training: topic_name: single-cell tutorials: - scrna-case_basic-pipeline + +recordings: +- youtube_id: 22t-4qvHnow + length: 11M + date: '2023-05-09' + speakers: + - hrukkudyr + captioners: + - hrukkudyr +- captioners: + - nomadscientist + date: '2021-02-15' + galaxy_version: '21.09' + length: 11M + youtube_id: U8pVa6csmUE + speakers: + - nomadscientist + --- diff --git a/topics/single-cell/tutorials/scrna-case_alevin/tutorial.md b/topics/single-cell/tutorials/scrna-case_alevin/tutorial.md index 5b07779d05bdc6..d289b2d30693c0 100644 --- a/topics/single-cell/tutorials/scrna-case_alevin/tutorial.md +++ b/topics/single-cell/tutorials/scrna-case_alevin/tutorial.md @@ -66,6 +66,17 @@ follow_up_training: topic_name: single-cell tutorials: - scrna-case_alevin-combine-datasets + +recordings: +- captioners: + - nomadscientist + date: '2021-02-15' + galaxy_version: '21.01' + length: 30M + youtube_id: 3ytm2AU6QUc + speakers: + - nomadscientist + --- @@ -250,7 +261,7 @@ We can now run Alevin. In some public instances, Alevin won't show up if you sea > > > - In *"Extra output files"*: > > > - {% icon param-check %} `Salmon Quant log file` > > > - {% icon param-check %} `Features used by the CB classification and their counts at each cell level (--dumpFeatures)` -> > > +> > > > > > - Of course you are welcome to select more options and explore the output files ({% icon warning %} warning: *"Per cell level parsimonious Umi graph (--dumpUmiGraph)"* will generate over 2 thousand single files), but for this tutorial you will only need to select those specified. > > > - In *"Advanced options"*: > > > - *"Dump cell v transcripts count matrix in MTX format"*: {% icon galaxy-toggle%} `Yes` diff --git a/topics/single-cell/tutorials/scrna-case_basic-pipeline/tutorial.md b/topics/single-cell/tutorials/scrna-case_basic-pipeline/tutorial.md index 193d495db34ddf..d28bd7bfbc8f6a 100644 --- a/topics/single-cell/tutorials/scrna-case_basic-pipeline/tutorial.md +++ b/topics/single-cell/tutorials/scrna-case_basic-pipeline/tutorial.md @@ -66,6 +66,17 @@ follow_up_training: tutorials: - scrna-case_JUPYTER-trajectories - scrna-case_monocle3-trajectories + +recordings: +- captioners: + - nomadscientist + date: '2021-02-15' + galaxy_version: '21.01' + length: 30M + youtube_id: M6iepSJh0EQ + speakers: + - nomadscientist + --- diff --git a/topics/single-cell/tutorials/scrna-case_cell-cycle/tutorial.md b/topics/single-cell/tutorials/scrna-case_cell-cycle/tutorial.md index dfeb597e2592d5..c68f95e011ea75 100644 --- a/topics/single-cell/tutorials/scrna-case_cell-cycle/tutorial.md +++ b/topics/single-cell/tutorials/scrna-case_cell-cycle/tutorial.md @@ -34,6 +34,7 @@ contributions: testing: - hrukkudyr + --- diff --git a/topics/single-cell/tutorials/scrna-case_monocle3-trajectories/tutorial.md b/topics/single-cell/tutorials/scrna-case_monocle3-trajectories/tutorial.md index d097b2d4d6445b..cc4ca33cd38eb4 100644 --- a/topics/single-cell/tutorials/scrna-case_monocle3-trajectories/tutorial.md +++ b/topics/single-cell/tutorials/scrna-case_monocle3-trajectories/tutorial.md @@ -59,6 +59,16 @@ contributions: - nomadscientist funding: - epsrc-training-grant + +recordings: +- youtube_id: Espl6qSbu3Y + date: '2023-04-11' + length: 15M + speakers: + - wee-snufkin + captioners: + - wee-snufkin + --- diff --git a/topics/single-cell/tutorials/scrna-intro/slides.html b/topics/single-cell/tutorials/scrna-intro/slides.html index 61a6b76bb83e5e..c740f125183328 100644 --- a/topics/single-cell/tutorials/scrna-intro/slides.html +++ b/topics/single-cell/tutorials/scrna-intro/slides.html @@ -40,6 +40,17 @@ contributors: - mtekman + +recordings: +- captioners: + - mtekman + date: '2021-02-15' + galaxy_version: '21.01' + length: 20M + youtube_id: D3qvYWZMFa0 + speakers: + - awspolly + --- # Single-cell RNA-seq diff --git a/topics/single-cell/tutorials/scrna-plant/tutorial.md b/topics/single-cell/tutorials/scrna-plant/tutorial.md index 333e1a6fae10ff..b5c67cbbc74a2a 100644 --- a/topics/single-cell/tutorials/scrna-plant/tutorial.md +++ b/topics/single-cell/tutorials/scrna-plant/tutorial.md @@ -37,6 +37,17 @@ contributors: gitter: Galaxy-Training-Network/galaxy-single-cell + +recordings: +- captioners: + - mtekman + date: '2021-02-15' + galaxy_version: '21.01' + length: 55M + youtube_id: yKlJ5ESri7o + speakers: + - mtekman + --- diff --git a/topics/single-cell/tutorials/scrna-preprocessing-tenx/tutorial.md b/topics/single-cell/tutorials/scrna-preprocessing-tenx/tutorial.md index abd060bc47ace2..8bf1f96f0c83f2 100644 --- a/topics/single-cell/tutorials/scrna-preprocessing-tenx/tutorial.md +++ b/topics/single-cell/tutorials/scrna-preprocessing-tenx/tutorial.md @@ -45,6 +45,25 @@ contributors: gitter: Galaxy-Training-Network/galaxy-single-cell + +recordings: +- youtube_id: IIiOcgiXviE + speakers: + - pavanvidem + captioners: + - pavanvidem + date: '2023-05-19' + length: 19M + galaxy_version: '23.01' +- captioners: + - khanteymoori + date: '2021-03-18' + galaxy_version: '21.01' + length: 5M + youtube_id: vNBNFkF0L4U + speakers: + - nomadscientist + --- diff --git a/topics/single-cell/tutorials/scrna-scanpy-pbmc3k/tutorial.md b/topics/single-cell/tutorials/scrna-scanpy-pbmc3k/tutorial.md index 44cabbda8b5bfe..972f9d4a0d9396 100644 --- a/topics/single-cell/tutorials/scrna-scanpy-pbmc3k/tutorial.md +++ b/topics/single-cell/tutorials/scrna-scanpy-pbmc3k/tutorial.md @@ -42,6 +42,16 @@ contributors: gitter: Galaxy-Training-Network/galaxy-single-cell + +recordings: +- captioners: + - hrhotz + date: '2021-03-18' + length: 45M + youtube_id: nefB35Bi1l4 + speakers: + - nomadscientist + --- diff --git a/topics/single-cell/tutorials/scrna-umis/tutorial.md b/topics/single-cell/tutorials/scrna-umis/tutorial.md index 461cfc1b100906..9cb66a8755176b 100644 --- a/topics/single-cell/tutorials/scrna-umis/tutorial.md +++ b/topics/single-cell/tutorials/scrna-umis/tutorial.md @@ -26,6 +26,17 @@ contributors: gitter: Galaxy-Training-Network/galaxy-single-cell + +recordings: +- captioners: + - khanteymoori + date: '2021-03-18' + galaxy_version: '21.01' + length: 10M + youtube_id: AJ17BicjmYU + speakers: + - nomadscientist + --- diff --git a/topics/statistics/tutorials/CNN/tutorial.md b/topics/statistics/tutorials/CNN/tutorial.md index 283d760ec1d42f..1361fc2a6f88a8 100644 --- a/topics/statistics/tutorials/CNN/tutorial.md +++ b/topics/statistics/tutorials/CNN/tutorial.md @@ -23,6 +23,16 @@ time_estimation: 2H contributors: - kxk302 +recordings: +- captioners: + - FilipposZ + date: '2021-02-15' + galaxy_version: '21.01' + length: 1H + youtube_id: P1NVYOJrv_4 + speakers: + - kxk302 + --- @@ -159,7 +169,7 @@ $$ \frac{(\text{input size} - \text{(filter size + (filter size -1)*(dilation - Figure 7 illustrates the calculations for a convolution operation, via a 3 by 3 filter on a single channel 5 by 5 input vector (5 x 5 x 1). Figure 8 illustrates the calculations when the input vector has 3 channels (5 x 5 x 3). To show this in 2 dimensions, we are displaying each channel in input -vector and filter separately. Figure 9 shows a sample multi-channel 2D convolution in 3 dimensions. +vector and filter separately. Figure 9 shows a sample multi-channel 2D convolution in 3 dimensions. ![Three matrices representing an input vector and another three matrices representing a filter, along with calculation for multiple input channel two dimensional convolution operation ](../../images/Conv_multiple_input_channel.png "Illustration of multiple input channel two dimensional convolution") diff --git a/topics/statistics/tutorials/FNN/tutorial.md b/topics/statistics/tutorials/FNN/tutorial.md index b3b2f9b5e99ce6..a5f5ec90d37307 100644 --- a/topics/statistics/tutorials/FNN/tutorial.md +++ b/topics/statistics/tutorials/FNN/tutorial.md @@ -27,6 +27,17 @@ follow_up_training: time_estimation: 2H contributors: - kxk302 + +recordings: +- captioners: + - kxk302 + date: '2021-02-15' + galaxy_version: '21.01' + length: 1H10M + youtube_id: VbzJDkyPL4A + speakers: + - kxk302 + --- diff --git a/topics/statistics/tutorials/RNN/tutorial.md b/topics/statistics/tutorials/RNN/tutorial.md index 4e35a5daa0a29a..eb290f55d5cea8 100644 --- a/topics/statistics/tutorials/RNN/tutorial.md +++ b/topics/statistics/tutorials/RNN/tutorial.md @@ -28,6 +28,15 @@ time_estimation: 2H contributors: - kxk302 +recordings: +- captioners: + - kxk302 + date: '2021-02-15' + galaxy_version: '21.01' + length: 50M + youtube_id: 1dwzEhjOAmw + speakers: + - kxk302 --- diff --git a/topics/statistics/tutorials/classification_machinelearning/tutorial.md b/topics/statistics/tutorials/classification_machinelearning/tutorial.md index 004e85dfe6a694..afcc3d875a41ec 100755 --- a/topics/statistics/tutorials/classification_machinelearning/tutorial.md +++ b/topics/statistics/tutorials/classification_machinelearning/tutorial.md @@ -21,6 +21,17 @@ contributors: - khanteymoori - anuprulez - simonbray + +recordings: +- captioners: + - anuprulez + date: '2021-02-15' + galaxy_version: '21.01' + length: 1H50M + youtube_id: Gz2OdRPS2Nk + speakers: + - anuprulez + --- diff --git a/topics/statistics/tutorials/fruit_360/tutorial.md b/topics/statistics/tutorials/fruit_360/tutorial.md index 51a1d3c2ecc1c7..ec85d26d5c89a0 100644 --- a/topics/statistics/tutorials/fruit_360/tutorial.md +++ b/topics/statistics/tutorials/fruit_360/tutorial.md @@ -18,19 +18,28 @@ time_estimation: 2H contributors: - kxk302 +recordings: +- captioners: + - kxk302 + date: '2022-01-19' + galaxy_version: '21.09' + length: 1H + youtube_id: EX8gZ1lxhNY + speakers: + - kxk302 --- -The classification of fruits and vegetables offers many useful applications such as -automated harvesting by robots, building up stocks for supermarkets, effective detection -of specific defects, and determining fruit ripeness ({% cite Duong2020 %}, -{% cite NaranjoTorres2020 %}, {% cite Iswari2017 %}). Machine Learning (ML) techniques -such as Deep Learning (DL) are commonly used for image classification problems in various -domains, including in agriculture ({% cite Kamilaris2018 %}). DL is a technique inspired -by how a human brain operates. Due to the increased availability of compute capacity and +The classification of fruits and vegetables offers many useful applications such as +automated harvesting by robots, building up stocks for supermarkets, effective detection +of specific defects, and determining fruit ripeness ({% cite Duong2020 %}, +{% cite NaranjoTorres2020 %}, {% cite Iswari2017 %}). Machine Learning (ML) techniques +such as Deep Learning (DL) are commonly used for image classification problems in various +domains, including in agriculture ({% cite Kamilaris2018 %}). DL is a technique inspired +by how a human brain operates. Due to the increased availability of compute capacity and training data, DL techniques have become very popular in recent years. In this tutorial, -we will use Galaxy's ML toolkit to build a DL model to classify fruit and vegetable -images. Our DL model is trained and evaluated on Fruit 360 dataset ({% cite Murean2018 %}) +we will use Galaxy's ML toolkit to build a DL model to classify fruit and vegetable +images. Our DL model is trained and evaluated on Fruit 360 dataset ({% cite Murean2018 %}) > > @@ -42,13 +51,13 @@ images. Our DL model is trained and evaluated on Fruit 360 dataset ({% cite Mure {: .agenda} ## Overview of convolutional neural networks (CNN) -Here we provide a brief overview of CNN. For a more in depth discussion, please refer to the CNN tutorial cited in the requirements -section. CNN were inspired by how the visual cortex of the brain processes visual information ({% cite HubelWiesel %}). There are +Here we provide a brief overview of CNN. For a more in depth discussion, please refer to the CNN tutorial cited in the requirements +section. CNN were inspired by how the visual cortex of the brain processes visual information ({% cite HubelWiesel %}). There are two types of cells in our visual cortex: **simple cells** detect objects at certain angles/locations, and **complex** cells, -which receive inputs from multiple simple cells, and detect movement. In 1980, inspired by hierarchical structure of complex and -simple cells, Fukushima proposed *Neocognitron* ({% cite Fukishima %}), a hierarchical neural network used for handwritten Japanese -character recognition. In 1989, LeCun et. al. ({% cite LeCunEtAl %}) proposed a CNN that could be trained by backpropagation -algorithm. CNN gained immense popularity when they outperformed other models at ImageNet Challenge, a competition in object +which receive inputs from multiple simple cells, and detect movement. In 1980, inspired by hierarchical structure of complex and +simple cells, Fukushima proposed *Neocognitron* ({% cite Fukishima %}), a hierarchical neural network used for handwritten Japanese +character recognition. In 1989, LeCun et. al. ({% cite LeCunEtAl %}) proposed a CNN that could be trained by backpropagation +algorithm. CNN gained immense popularity when they outperformed other models at ImageNet Challenge, a competition in object classification and detection on hundreds of object categories and millions of images. ## Architecture of CNN @@ -69,17 +78,17 @@ The input layer represents the input to the CNN. An example input, could be a 28 ### Convolution layer -The convolution layer is composed of multiple **filters** (also called **kernels**). Filters for a 2D image are also 2D. Suppose -we have a 28 by 28 pixel grayscale image. Each pixel is represented by a number between 0 and 255, where 0 represents the color -black, 255 represents the color white, and the values in between represent different shades of gray. Suppose we have a 3 by 3 -filter (9 values in total), and the values are randomly set to 0 or 1. Convolution is the process of placing the 3 by 3 filter -on the top left corner of the image, multiplying filter values by the pixel values and adding the results, moving the filter to -the right one pixel at a time and repeating this process (Figure 1). When we get to the top right corner of the image, we simply +The convolution layer is composed of multiple **filters** (also called **kernels**). Filters for a 2D image are also 2D. Suppose +we have a 28 by 28 pixel grayscale image. Each pixel is represented by a number between 0 and 255, where 0 represents the color +black, 255 represents the color white, and the values in between represent different shades of gray. Suppose we have a 3 by 3 +filter (9 values in total), and the values are randomly set to 0 or 1. Convolution is the process of placing the 3 by 3 filter +on the top left corner of the image, multiplying filter values by the pixel values and adding the results, moving the filter to +the right one pixel at a time and repeating this process (Figure 1). When we get to the top right corner of the image, we simply move the filter down one pixel and restart from the left. This process ends when we get to the bottom right corner of the image. ![A 3 by 3 filter applied to a 4 by 4 image, resulting in a 2 by 2 image](../../images/Conv_no_padding_no_strides.gif "A 3 by 3 filter applied to a 4 by 4 image, resulting in a 2 by 2 image ({% cite DumoulinVisin %})") -Covolution operator has several parameters. +Covolution operator has several parameters. 1. Filter size 2. Padding @@ -87,97 +96,97 @@ Covolution operator has several parameters. 4. Dilation 5. Activation function -Filter size can be 5 by 5, 3 by 3, and so on. Larger filter sizes should be avoided as more weights need to be learned (more -compute capacity, more training time, more chance of overfitting). Also, odd sized filters are preferred to even sized filters, +Filter size can be 5 by 5, 3 by 3, and so on. Larger filter sizes should be avoided as more weights need to be learned (more +compute capacity, more training time, more chance of overfitting). Also, odd sized filters are preferred to even sized filters, due to the nice geometric property of all the input pixels being around the output pixel. -If you look at Figure 1 you see that after applying a 3 by 3 filter to a 4 by 4 image, we end up with a 2 by 2 image -- the -size of the image has gone down. If we want to keep the image size the same, we can use *padding* (Figure 2). We pad the input -in every direction with 0's before applying the filter. If the padding is 1 by 1, then we add 1 zero in evey direction. If its +If you look at Figure 1 you see that after applying a 3 by 3 filter to a 4 by 4 image, we end up with a 2 by 2 image -- the +size of the image has gone down. If we want to keep the image size the same, we can use *padding* (Figure 2). We pad the input +in every direction with 0's before applying the filter. If the padding is 1 by 1, then we add 1 zero in evey direction. If its 2 by 2, then we add 2 zeros in every direction, and so on. ![A 3 by 3 filter applied to a 5 by 5 image, with padding of 1, resulting in a 5 by 5 image](../../images/Conv_same_padding_no_strides.gif "A 3 by 3 filter applied to a 5 by 5 image, with padding of 1, resulting in a 5 by 5 image ({% cite DumoulinVisin %})") -As mentioned before, we start the convolution by placing the filter on the top left corner of the image, and after multiplying -filter and image values (and adding them), we move the filter to the right and repeat the process. How many pixels we move to -the right (or down) is the *stride*. In figure 1 and 2, the stride of the filter is 1. We move the filter one pixel to the right +As mentioned before, we start the convolution by placing the filter on the top left corner of the image, and after multiplying +filter and image values (and adding them), we move the filter to the right and repeat the process. How many pixels we move to +the right (or down) is the *stride*. In figure 1 and 2, the stride of the filter is 1. We move the filter one pixel to the right (or down). But we could use a different stride. Figure 3 shows an example of using stride of 2. ![A 3 by 3 filter applied to a 5 by 5 image, with stride of 2, resulting in a 2 by 2 image](../../images/Conv_no_padding_strides.gif "A 3 by 3 filter applied to a 5 by 5 image, with stride of 2, resulting in a 2 by 2 image ({% cite DumoulinVisin %})") -When we apply a, say 3 by 3, filter to an image, our filter's output is affected by pixels in a 3 by 3 subset of the image. If we -like to have a larger *receptive field* (portion of image that affect filter's output), we could use *dilation*. If we set the -dilation to 2 (Figure 4), instead of a contiguous 3 by 3 subset of the image, every other pixel of a 5 by 5 subset of the image +When we apply a, say 3 by 3, filter to an image, our filter's output is affected by pixels in a 3 by 3 subset of the image. If we +like to have a larger *receptive field* (portion of image that affect filter's output), we could use *dilation*. If we set the +dilation to 2 (Figure 4), instead of a contiguous 3 by 3 subset of the image, every other pixel of a 5 by 5 subset of the image affects the filter's output. ![A 3 by 3 filter applied to a 7 by 7 image, with dilation of 2, resulting in a 3 by 3 image](../../images/Conv_dilation.gif "A 3 by 3 filter applied to a 7 by 7 image, with dilation of 2, resulting in a 3 by 3 image ({% cite DumoulinVisin %})") -After the filter scans the whole image, we apply an activation function to filter output to introduce non-linearlity. The preferred -activation function used in CNN is ReLU ({% cite NwankpaEtAl %}). ReLU leaves pixels with positive values in filter output as is, +After the filter scans the whole image, we apply an activation function to filter output to introduce non-linearlity. The preferred +activation function used in CNN is ReLU ({% cite NwankpaEtAl %}). ReLU leaves pixels with positive values in filter output as is, and replaces negative values with 0. Figure 5 shows the results of applying ReLU activation function to a filter output. ![Two matrices representing filter output before and after ReLU activation function is applied](../../images/Conv_ReLU.png "Applying ReLU activation function to filter output") ![One matrix representing an input and another matrix representing a filter, along with calculation for single input channel two dimensional convolution operation](../../images/Conv_single_input_channel.png "Illustration of single input channel two dimensional convolution") -Figure 6 illustrates the calculations for a convolution operation, via a 3 by 3 filter on a single channel 5 by 5 input -(5 x 5 x 1). Figure 7 illustrates the calculations when the input has 3 channels. To show this in 2 dimensions, we are -displaying each channel in input and filter separately. Figure 9 shows a sample multi-channel 2D convolution in 3 dimensions. +Figure 6 illustrates the calculations for a convolution operation, via a 3 by 3 filter on a single channel 5 by 5 input +(5 x 5 x 1). Figure 7 illustrates the calculations when the input has 3 channels. To show this in 2 dimensions, we are +displaying each channel in input and filter separately. Figure 9 shows a sample multi-channel 2D convolution in 3 dimensions. ![Three matrices representing an input and another three matrices representing a filter, along with calculation for multiple input channel two dimensional convolution operation ](../../images/Conv_multiple_input_channel.png "Illustration of multiple input channel two dimensional convolution") -As Figures 7 and 8 show the output of a multi-channel 2D filter is a single channel 2D image. Applying *multiple* filters to the -input image results in a multi-channel 2D image for the output. For example, if the input image is 28 by 28 by 3 -(rows x columns x channels), and we apply a 3 by 3 filter with 1 by 1 padding, we would get a 28 by 28 by 1 image. If we apply 15 -filters to the input image, our output would be 28 by 28 by 15. Hence, the number of filters in a convolution layer allows us to +As Figures 7 and 8 show the output of a multi-channel 2D filter is a single channel 2D image. Applying *multiple* filters to the +input image results in a multi-channel 2D image for the output. For example, if the input image is 28 by 28 by 3 +(rows x columns x channels), and we apply a 3 by 3 filter with 1 by 1 padding, we would get a 28 by 28 by 1 image. If we apply 15 +filters to the input image, our output would be 28 by 28 by 15. Hence, the number of filters in a convolution layer allows us to increase or decrease the channel size. ![Multiple cubes representing input, filter, and output in a 3 channel 2 dimensional convolution operation](../../images/Conv_multiple_channel_3d.gif "Three dimensional illustration of multiple input channel two dimensional convolution (Source: https://thomelane.github.io/convolutions/2DConvRGB.html)") ### Pooling layer -The pooling layer performs down sampling to reduce the spatial dimensionality of the input. This decreases the number of parameters, -which in turn reduces the learning time and computation, and the likelihood of overfitting. The most popular type of pooling is -*max pooling*. Its usually a 2 by 2 filter with a stride of 2 that returns the maximum value as it slides over the input data, +The pooling layer performs down sampling to reduce the spatial dimensionality of the input. This decreases the number of parameters, +which in turn reduces the learning time and computation, and the likelihood of overfitting. The most popular type of pooling is +*max pooling*. Its usually a 2 by 2 filter with a stride of 2 that returns the maximum value as it slides over the input data, similar to convolution filters. ### Fully connected layer -The last layer in a CNN is a fully connected layer. We connect all the nodes from the previous layer to this fully connected layer, +The last layer in a CNN is a fully connected layer. We connect all the nodes from the previous layer to this fully connected layer, which is responsible for classification of the image. ![A convolutional neural network with 3 convolution layers followed by 3 pooling layers](../../images/Conv_CNN.png "A convolutional neural network with 3 convolution layers followed by 3 pooling layers ({% cite OSheaEtAl %})") -As shown in Figure 9, a typical CNN usually has more than one convolution plus pooling layer. Each convolution plus pooling layer -is responsible for feature extraction at a different level of abstraction. For example, the filters in the first layer could detect -horizontal, vertical, and diagonal edges. The filters in the next layer could detect shapes, and the filters in the last layer could -detect collection of shapes. Filter values are randomly initialized and are learned by the learning algorithm. This makes CNN very +As shown in Figure 9, a typical CNN usually has more than one convolution plus pooling layer. Each convolution plus pooling layer +is responsible for feature extraction at a different level of abstraction. For example, the filters in the first layer could detect +horizontal, vertical, and diagonal edges. The filters in the next layer could detect shapes, and the filters in the last layer could +detect collection of shapes. Filter values are randomly initialized and are learned by the learning algorithm. This makes CNN very powerful as they not only do classification, but can also automatically do feature extraction. This distinguishes CNN from other classification techniques (like Support Vector Machines), which cannot do feature extraction. ## Fruit 360 dataset -Fruit 360 is a dataset with 90380 images of 131 fruits and vegetables -(https://www.kaggle.com/moltean/fruits). Images are 100 pixel by 100 pixel and are color -(RGB) images (Hence, 3 values for each pixel). There are 67,692 images in the training -dataset and 22,688 images in the test dataset. The dataset we use for this tutorial is a -subset of fruit 360 dataset, containing only 10 fruits/vegetables (Strawberry, -Apple_Red_Delicious, Pepper_Green, Corn, Banana, Tomato_1, Potato_White, Pineapple, -Orange, and Peach). We selected a subset of fruits/vegetables, so the dataset size is -smaller and the neural network can be trained faster. Our training dataset has 5,015 images +Fruit 360 is a dataset with 90380 images of 131 fruits and vegetables +(https://www.kaggle.com/moltean/fruits). Images are 100 pixel by 100 pixel and are color +(RGB) images (Hence, 3 values for each pixel). There are 67,692 images in the training +dataset and 22,688 images in the test dataset. The dataset we use for this tutorial is a +subset of fruit 360 dataset, containing only 10 fruits/vegetables (Strawberry, +Apple_Red_Delicious, Pepper_Green, Corn, Banana, Tomato_1, Potato_White, Pineapple, +Orange, and Peach). We selected a subset of fruits/vegetables, so the dataset size is +smaller and the neural network can be trained faster. Our training dataset has 5,015 images and our testing dataset has 1,679 images. -The utilities used to create the subset dataset, along with step by step instructions, can -be found here: https://github.com/kxk302/fruit_dataset_utilities. First, we created feature -vectors for each image. Images are 100 pixel by 100 pixel and are color (RGB) images -(3 values for each pixel). Hence, each image can be represented by 30,000 values -(100 X 100 X 3). Second, we selected a subset of 10 fruit/vegetable images. Training and -testing dataset sizes go from 7 GB and 2.5 GB for 131 fruits/vegetables to 500 MB and -177 MB for 10 fruits/vegetables, respectively. Third, we created separate files for feature -vectors and labels. Finally, we mapped the labels for the 10 selected fruits/vegetables to -a range of 0 to 9. Full dataset labels are in the 0 to 130 range, as the full dataset -includes 131 fruits/vegetables. The 10 labels for out dataset are as follows: Strawberry:0, -Apple_Red_Delicious:1, Pepper_Green:2, Corn:3, Banana:4, Tomato_1:5, Potato_White:6, Pineapple:7, -Orange:8, Peach:9. +The utilities used to create the subset dataset, along with step by step instructions, can +be found here: https://github.com/kxk302/fruit_dataset_utilities. First, we created feature +vectors for each image. Images are 100 pixel by 100 pixel and are color (RGB) images +(3 values for each pixel). Hence, each image can be represented by 30,000 values +(100 X 100 X 3). Second, we selected a subset of 10 fruit/vegetable images. Training and +testing dataset sizes go from 7 GB and 2.5 GB for 131 fruits/vegetables to 500 MB and +177 MB for 10 fruits/vegetables, respectively. Third, we created separate files for feature +vectors and labels. Finally, we mapped the labels for the 10 selected fruits/vegetables to +a range of 0 to 9. Full dataset labels are in the 0 to 130 range, as the full dataset +includes 131 fruits/vegetables. The 10 labels for out dataset are as follows: Strawberry:0, +Apple_Red_Delicious:1, Pepper_Green:2, Corn:3, Banana:4, Tomato_1:5, Potato_White:6, Pineapple:7, +Orange:8, Peach:9. ## Get data @@ -224,9 +233,9 @@ Orange:8, Peach:9. ## Classification of fruit 360 dataset images with CNN -In this section, we define a CNN and train it using fruit 360 dataset training data. The -goal is to learn a model such that given an image of a fruit/vegetable, we can predict -what fruit/vegetable it is (Labels are in the range of 0 to 9). We then evaluate the trained +In this section, we define a CNN and train it using fruit 360 dataset training data. The +goal is to learn a model such that given an image of a fruit/vegetable, we can predict +what fruit/vegetable it is (Labels are in the range of 0 to 9). We then evaluate the trained CNN on the test dataset and plot the confusion matrix. In order to train the CNN, we must have the One-Hot Encoding (OHE) representation of the training @@ -321,18 +330,18 @@ its OHE representation. > - Click *"Run Tool"* {: .hands_on} -Each image is passed in as a vector of size 30,000 (100 x 100 X 3 = 30,000). The reshape -layer reshapes it into (100, 100, 3) dimensions -- 100 rows (image height), 100 columns -(image width), and 3 channels. Channel size is 3 since the image is color (RGB) and each -color pixel can be represented by 3 integers, representing the Red, Green, and Blue -primary colors. Our CNN then has 3 convolution + pooling layers. The first convolution layer +Each image is passed in as a vector of size 30,000 (100 x 100 X 3 = 30,000). The reshape +layer reshapes it into (100, 100, 3) dimensions -- 100 rows (image height), 100 columns +(image width), and 3 channels. Channel size is 3 since the image is color (RGB) and each +color pixel can be represented by 3 integers, representing the Red, Green, and Blue +primary colors. Our CNN then has 3 convolution + pooling layers. The first convolution layer has 16 filters (output channel size would be 16), and filter size is 5 x 5. The second convolutional -layer has 32 filters (output channel size would be 32), and filter size is 5 x 5. The third +layer has 32 filters (output channel size would be 32), and filter size is 5 x 5. The third convolutional layer has 64 filters (output channel size would be 64), and filter size is 5 x 5. All -3 pooling layers are MaxPool layers with pool size of 2 x 2. Afterwards, we flatten the previous layer's +3 pooling layers are MaxPool layers with pool size of 2 x 2. Afterwards, we flatten the previous layer's output (every row/column/channel would be an individual node), then add a fully connected layer with 256 -nodes and relu activation function. Finally, we add a fully connected layers with 10 nodes, and use -softmax activation function to get the probability of each fruit/vegetable. Fruit/vegetable with the +nodes and relu activation function. Finally, we add a fully connected layers with 10 nodes, and use +softmax activation function to get the probability of each fruit/vegetable. Fruit/vegetable with the highest probability is predicted by CNN. The model config can be downloaded as a JSON file. ### **Create a deep learning model** @@ -353,11 +362,11 @@ highest probability is predicted by CNN. The model config can be downloaded as a > - Click *"Run Tool"* {: .hands_on} -A loss function measures how different the predicted output is from the expected output. For multi-class classification problems, -we use *categorical cross entropy* as loss function. Epochs is the number of times the whole training data is used to train the -model. Setting *epochs* to 40 means each training example in our dataset is used 40 times to train our model. If we update network -weights/biases after all the training data is feed to the network, the training will be very slow (as we have 5014 training examples -in our dataset). To speed up the training, we present only a subset of the training examples to the network, after which we update +A loss function measures how different the predicted output is from the expected output. For multi-class classification problems, +we use *categorical cross entropy* as loss function. Epochs is the number of times the whole training data is used to train the +model. Setting *epochs* to 40 means each training example in our dataset is used 40 times to train our model. If we update network +weights/biases after all the training data is feed to the network, the training will be very slow (as we have 5014 training examples +in our dataset). To speed up the training, we present only a subset of the training examples to the network, after which we update the weights/biases. *batch_size* decides the size of this subset. The model builder can be downloaded as a zip file. ### **Deep learning training and evaluation** @@ -377,7 +386,7 @@ the weights/biases. *batch_size* decides the size of this subset. The model buil > {: .hands_on} -The training step generates 3 datasets. 1) accuracy of the trained model, 2) the trained model, downloadable as a zip file, and +The training step generates 3 datasets. 1) accuracy of the trained model, 2) the trained model, downloadable as a zip file, and 3) the trained model weights, downloadable as an hdf5 file. These files are needed for prediction in the next step. ### **Model Prediction** @@ -395,7 +404,7 @@ The training step generates 3 datasets. 1) accuracy of the trained model, 2) the > {: .hands_on} -The prediction step generates 1 dataset. It's a file that has predictions (0 to 9 for the predicted fruit/vegetable) for every image +The prediction step generates 1 dataset. It's a file that has predictions (0 to 9 for the predicted fruit/vegetable) for every image in the test dataset. ### **Machine Learning Visualization Extension** @@ -414,14 +423,14 @@ in the test dataset. > {: .hands_on} -**Confusion Matrix** is a table that describes the performance of a classification model. It lists the number of examples that were -correctly classified by the model, True positives (TP) and true negatives (TN). It also lists the number of examples that were -classified as positive that were actually negative (False positive, FP, or Type I error), and the number of examples that were -classified as negative that were actually positive (False negative, FN, or Type 2 error). Given the confusion matrix, we can -calculate **precision** and **recall** {% cite TatbulEtAl %}. Precision is the fraction of predicted positives that are true -positives (Precision = TP / (TP + FP)). Recall is the fraction of true positives that are predicted (Recall = TP / (TP + FN)). -One way to describe the confusion matrix with just one value is to use the **F score**, which is the harmonic mean of precision -and recall +**Confusion Matrix** is a table that describes the performance of a classification model. It lists the number of examples that were +correctly classified by the model, True positives (TP) and true negatives (TN). It also lists the number of examples that were +classified as positive that were actually negative (False positive, FP, or Type I error), and the number of examples that were +classified as negative that were actually positive (False negative, FN, or Type 2 error). Given the confusion matrix, we can +calculate **precision** and **recall** {% cite TatbulEtAl %}. Precision is the fraction of predicted positives that are true +positives (Precision = TP / (TP + FP)). Recall is the fraction of true positives that are predicted (Recall = TP / (TP + FN)). +One way to describe the confusion matrix with just one value is to use the **F score**, which is the harmonic mean of precision +and recall $$ Precision = \frac{\text{True positives}}{\text{True positives + False positives}} $$ @@ -431,20 +440,20 @@ $$ F score = \frac{2 * \text{Precision * Recall}}{\text{Precision + Recall}} $$ ![Confusion matrix for fruit 360 image classification problem](../../images/Fruit_confusion_matrix.png "Fruits/vegetables image classification confusion matrix") -Figure 1 is the resultant confusion matrix for our image classification problem. The first row in the table represents the *true* -fruit/vegetable with 0 as class label, which is strawberry (we have 164 strawberry images with 0 as class label). The second row -represents the *true* fruit/vegetable with 1 as class label, which is Apple_Red_Delicious (We have 166 Apple_Red_Delicious images -with 1 as class label). Similarly, you can count the true class labels for fruits/vegetables with class label of 2 to 9 by adding -up the numbers in the corresponding row. The first column from the left represents the *predicted* fruit/vegetable with 0 as class -label, which is strawberry (Our CNN predicted 164 images as being strawberry, and having class label 0). The second column from -the left represents the *predicted* fruit/vegetable with 1 as class label, which is Apple_Red_Delicious (Our CNN predicted 166 -images as being Apple_Red_Delicious, and having class label 1). Similarly, you can count the predicted class labels for +Figure 1 is the resultant confusion matrix for our image classification problem. The first row in the table represents the *true* +fruit/vegetable with 0 as class label, which is strawberry (we have 164 strawberry images with 0 as class label). The second row +represents the *true* fruit/vegetable with 1 as class label, which is Apple_Red_Delicious (We have 166 Apple_Red_Delicious images +with 1 as class label). Similarly, you can count the true class labels for fruits/vegetables with class label of 2 to 9 by adding +up the numbers in the corresponding row. The first column from the left represents the *predicted* fruit/vegetable with 0 as class +label, which is strawberry (Our CNN predicted 164 images as being strawberry, and having class label 0). The second column from +the left represents the *predicted* fruit/vegetable with 1 as class label, which is Apple_Red_Delicious (Our CNN predicted 166 +images as being Apple_Red_Delicious, and having class label 1). Similarly, you can count the predicted class labels for fruits/vegetables with class labels 2 to 9 by adding up the numbers in the corresponding column. -For label 3, which is corn, looking at the green cell in the 4th row and 4th column, we see that our CNN has correctly -predicted 118 images as being a corn image (True positives). Adding the numbers in the other rows in column 4, we see that -our CNN has incorrectly predicted 16 images as being corn (False positives). Adding the numbers on the 4th row besides the -True positives, we see that our CNN has incorrectly predicted 32 corn images as being label Potato_White (False negatives). +For label 3, which is corn, looking at the green cell in the 4th row and 4th column, we see that our CNN has correctly +predicted 118 images as being a corn image (True positives). Adding the numbers in the other rows in column 4, we see that +our CNN has incorrectly predicted 16 images as being corn (False positives). Adding the numbers on the 4th row besides the +True positives, we see that our CNN has incorrectly predicted 32 corn images as being label Potato_White (False negatives). Given these numbers we can calculate Precision, Recall, and the F score for digit 0 as follows: $$ Precision = \frac{\text{True positives}}{\text{True positives + False positives}} = \frac{118}{118 + 16} = 0.88 $$ diff --git a/topics/statistics/tutorials/intro-to-ml-with-r/tutorial.md b/topics/statistics/tutorials/intro-to-ml-with-r/tutorial.md index b16d793ecc6a61..7e19d39ea08f3e 100644 --- a/topics/statistics/tutorials/intro-to-ml-with-r/tutorial.md +++ b/topics/statistics/tutorials/intro-to-ml-with-r/tutorial.md @@ -44,6 +44,17 @@ key_points: contributors: - fpsom - gallantries + +recordings: +- captioners: + - MariaTsayo + date: '2021-02-15' + galaxy_version: '21.01' + length: 1H30M + youtube_id: RT-g6KyAGdE + speakers: + - fpsom + --- This is an Introduction to Machine Learning in R, in which you'll learn the basics of unsupervised learning for pattern recognition and supervised learning for prediction. At the end of this workshop, we hope that you will: diff --git a/topics/statistics/tutorials/regression_machinelearning/tutorial.md b/topics/statistics/tutorials/regression_machinelearning/tutorial.md index d4346131ee07b7..7f5d6828327909 100755 --- a/topics/statistics/tutorials/regression_machinelearning/tutorial.md +++ b/topics/statistics/tutorials/regression_machinelearning/tutorial.md @@ -19,6 +19,17 @@ contributors: - khanteymoori - anuprulez - simonbray + +recordings: +- captioners: + - anuprulez + date: '2021-02-15' + galaxy_version: '21.01' + length: 1H29M + youtube_id: qxaWQjtEOzM + speakers: + - anuprulez + --- diff --git a/topics/synthetic-biology/tutorials/basic_assembly_analysis/tutorial.md b/topics/synthetic-biology/tutorials/basic_assembly_analysis/tutorial.md index 223562e89a9b13..d47f4e05b607ab 100644 --- a/topics/synthetic-biology/tutorials/basic_assembly_analysis/tutorial.md +++ b/topics/synthetic-biology/tutorials/basic_assembly_analysis/tutorial.md @@ -23,11 +23,20 @@ contributors: - ioanagry - jfaulon +recordings: +- captioners: + - kenza12 + date: '2022-05-11' + length: 3M + youtube_id: 0ItpHuSM7t0 + speakers: + - breakthewall + type: Demo --- -Synthetic biology is a novel engineering discipline which requires computational tools for the design of metabolic pathways for the production of chemicals such as SynBioCAD portal which is the first Galaxy set of tools for synthetic biology and metabolic engineering ({% cite Hrisson2022 %}). +Synthetic biology is a novel engineering discipline which requires computational tools for the design of metabolic pathways for the production of chemicals such as SynBioCAD portal which is the first Galaxy set of tools for synthetic biology and metabolic engineering ({% cite Hrisson2022 %}). In this tutorial, we will use a set of tools from the **Genetic Design - BASIC Assembly Workflow** (https://galaxy-synbiocad.org) which will enable you to design plasmids implementing metabolic pathways for the bioproduction of lycopene in _E.coli_ (one of the preferred host cell for microbial biochemicals production). @@ -211,7 +220,7 @@ In this section, you can run the Genetic Design - BASIC Assembly Workflow more e > > > > > -> > All the outputs will be automatically generated and identical to the previous ones. +> > All the outputs will be automatically generated and identical to the previous ones. > {: .comment} {: .hands_on} diff --git a/topics/synthetic-biology/tutorials/pathway_analysis/tutorial.md b/topics/synthetic-biology/tutorials/pathway_analysis/tutorial.md index 470002568577a8..ce7328dd4a719f 100644 --- a/topics/synthetic-biology/tutorials/pathway_analysis/tutorial.md +++ b/topics/synthetic-biology/tutorials/pathway_analysis/tutorial.md @@ -19,6 +19,16 @@ contributors: - ioanagry - jfaulon +recordings: +- captioners: + - kenza12 + date: '2022-09-26' + length: 5M + youtube_id: 240oxe8unnU + speakers: + - breakthewall + type: Demo + --- Progress in synthetic biology is enabled by powerful bioinformatics tools such as those aimed to design metabolic pathways for the production of chemicals. These tools are available in SynBioCAD portal which is the first Galaxy set of tools for synthetic biology and metabolic engineering ({% cite Hrisson2022 %}). @@ -88,9 +98,9 @@ First we need to upload and prepare the following inputs to analyze: # Compute the target product flux -Notice that the starting compounds (in other words, _the precursors_) of the predicted pathways (also referred as the _heterologous pathways_) are compounds that have been initially extracted from the genome-scale metabolic model (GEM) of the organism we are interested in (also referred as _chassis_). While this step is out of the scope of the present Pathway Analysis tutorial, this means that the precursors of predicted pathways are also present in the chassis model. Hence, predicted pathways and the chassis organism model can be merged to construct "augmented" whole-cell models, enabling flux analysis of these metabolic systems. This is what we'll do here to predict the production flux of a compound of interest. +Notice that the starting compounds (in other words, _the precursors_) of the predicted pathways (also referred as the _heterologous pathways_) are compounds that have been initially extracted from the genome-scale metabolic model (GEM) of the organism we are interested in (also referred as _chassis_). While this step is out of the scope of the present Pathway Analysis tutorial, this means that the precursors of predicted pathways are also present in the chassis model. Hence, predicted pathways and the chassis organism model can be merged to construct "augmented" whole-cell models, enabling flux analysis of these metabolic systems. This is what we'll do here to predict the production flux of a compound of interest. -Within the frame of this tutorial, we'll use the _E. coli_ iML1515 GEM (downloaded from the [BiGG database](http://bigg.ucsd.edu/)) to model the chassis metabolism of _E. coli_ and the target compound is the lycopene. The provided _E. coli_ model is in the SBML. The extraction of precursor compounds and the pathway prediction have already been performed during the RetroSynthesis workflow (available in [Galaxy SynbioCAD platform](https://galaxy-synbiocad.org)). +Within the frame of this tutorial, we'll use the _E. coli_ iML1515 GEM (downloaded from the [BiGG database](http://bigg.ucsd.edu/)) to model the chassis metabolism of _E. coli_ and the target compound is the lycopene. The provided _E. coli_ model is in the SBML. The extraction of precursor compounds and the pathway prediction have already been performed during the RetroSynthesis workflow (available in [Galaxy SynbioCAD platform](https://galaxy-synbiocad.org)). The FBA (Flux Balance Analysis) method used to calculate the flux is a mathematical approach (as decribed in section Methods in {% cite Hrisson2022 %}) which uses the COBRApy package ({% cite Ebrahim2013 %}) and proposes 3 different analysis methods (standard FBA, parsimonious FBA, fraction of reaction). The first two methods are specific to the COBRApy package and the last one `Fraction of Reaction` is an in-house analysis method (as decribed in section Methods in {% cite Hrisson2022 %}) to consider the cell needs for its own maintenance while producing the target compound. @@ -255,7 +265,7 @@ In this section, you can run the Pathway Analysis Workflow more easily and fastl > > > > > -> > All the outputs will be automatically generated and identical to the previous ones. +> > All the outputs will be automatically generated and identical to the previous ones. > {: .comment} {: .hands_on} diff --git a/topics/synthetic-biology/tutorials/retrosynthesis_analysis/tutorial.md b/topics/synthetic-biology/tutorials/retrosynthesis_analysis/tutorial.md index 23a791bee318d3..f4a9fa73c64c69 100644 --- a/topics/synthetic-biology/tutorials/retrosynthesis_analysis/tutorial.md +++ b/topics/synthetic-biology/tutorials/retrosynthesis_analysis/tutorial.md @@ -18,6 +18,17 @@ contributors: - breakthewall - ioanagry - jfaulon + +recordings: +- captioners: + - kenza12 + date: '2022-06-23' + length: 5M + youtube_id: nnE3Ip94D8I + speakers: + - breakthewall + type: Demo + --- @@ -60,8 +71,8 @@ RetroSynthesis workflow will be run with the following inputs: 2. The structure of metabolites present in the chosen chassis (E. coli), 3. Reaction rules (generated by RRules Parser node that calls RetroRules). -The data used are pretty straight forward to obtain. -Firstly, we download an SBML model, then we select all sinks to use into the RetroPath2.0 software from this model. +The data used are pretty straight forward to obtain. +Firstly, we download an SBML model, then we select all sinks to use into the RetroPath2.0 software from this model. Lastly, we request from RetroRules all possible reactions to find a chemical reaction cascade that produces the target. ## Download a model @@ -266,7 +277,7 @@ In this section, you can run the RetroSynthesis Workflow more easily and fastly > > > > > -> > All the outputs will be automatically generated and identical to the previous ones. +> > All the outputs will be automatically generated and identical to the previous ones. > {: .comment} {: .hands_on} diff --git a/topics/transcriptomics/tutorials/de-novo/tutorial.md b/topics/transcriptomics/tutorials/de-novo/tutorial.md index fd8cc9098a4622..68b2e242a4c9e7 100755 --- a/topics/transcriptomics/tutorials/de-novo/tutorial.md +++ b/topics/transcriptomics/tutorials/de-novo/tutorial.md @@ -24,6 +24,17 @@ contributions: - moheydarian editing: - jxtx + +recordings: +- captioners: + - malloryfreeberg + date: '2021-02-15' + galaxy_version: '21.01' + length: 48M + youtube_id: WrOIpCxba78 + speakers: + - malloryfreeberg + --- diff --git a/topics/transcriptomics/tutorials/introduction/slides.html b/topics/transcriptomics/tutorials/introduction/slides.html index 0244da28dc8205..5360004938ce49 100644 --- a/topics/transcriptomics/tutorials/introduction/slides.html +++ b/topics/transcriptomics/tutorials/introduction/slides.html @@ -10,6 +10,18 @@ - bebatut - erxleben - mwolfien + +recordings: +- captioners: + - nagoue + - shiltemann + date: '2021-02-15' + galaxy_version: '21.01' + length: 30M + youtube_id: qKkS_tztw_Q + speakers: + - fpsom + --- # What is RNA sequencing? diff --git a/topics/transcriptomics/tutorials/mirna-target-finder/slides.html b/topics/transcriptomics/tutorials/mirna-target-finder/slides.html index 5c2cc19fd9d85f..0bd9becbac4559 100644 --- a/topics/transcriptomics/tutorials/mirna-target-finder/slides.html +++ b/topics/transcriptomics/tutorials/mirna-target-finder/slides.html @@ -12,6 +12,25 @@ video: yes zenodo_link: "" + +recordings: +- captioners: + - gallardoalba + date: '2021-04-20' + galaxy_version: '21.01' + length: 10M + youtube_id: FNmZoo2OXTU + speakers: + - awspolly +- captioners: + - gallardoalba + date: '2021-02-15' + galaxy_version: '21.01' + length: 10M + youtube_id: 7t0Gr1Jklqs + speakers: + - awspolly + --- # Introduction diff --git a/topics/transcriptomics/tutorials/mirna-target-finder/tutorial.md b/topics/transcriptomics/tutorials/mirna-target-finder/tutorial.md index 0ad793d7f889d6..d9b3bd70b7e9a5 100644 --- a/topics/transcriptomics/tutorials/mirna-target-finder/tutorial.md +++ b/topics/transcriptomics/tutorials/mirna-target-finder/tutorial.md @@ -24,6 +24,17 @@ contributors: - pavanvidem - beatrizserrano + +recordings: +- captioners: + - gallardoalba + date: '2021-07-25' + galaxy_version: '21.05' + length: 1H10M + youtube_id: wS1X_0Mgw7M + speakers: + - gallardoalba + --- diff --git a/topics/transcriptomics/tutorials/ref-based/tutorial.md b/topics/transcriptomics/tutorials/ref-based/tutorial.md index 3ab8d1ce716b8a..f98a7fc2b43d29 100644 --- a/topics/transcriptomics/tutorials/ref-based/tutorial.md +++ b/topics/transcriptomics/tutorials/ref-based/tutorial.md @@ -57,6 +57,26 @@ contributions: - lldelisle editing: - hexylena + +recordings: +- youtube_id: AeiW3IItO_c + speakers: + - lldelisle + captioners: + - lldelisle + date: '2023-05-15' + galaxy_version: '23.01' + length: 2H50M +- captioners: + - hexylena + - shiltemann + date: '2021-02-15' + galaxy_version: '21.01' + length: 2H30M + youtube_id: j4onRSN650A + speakers: + - bebatut + --- diff --git a/topics/transcriptomics/tutorials/rna-seq-counts-to-viz-in-r/tutorial.md b/topics/transcriptomics/tutorials/rna-seq-counts-to-viz-in-r/tutorial.md index ca489cd6f9f444..a4edc1c4f9fecf 100644 --- a/topics/transcriptomics/tutorials/rna-seq-counts-to-viz-in-r/tutorial.md +++ b/topics/transcriptomics/tutorials/rna-seq-counts-to-viz-in-r/tutorial.md @@ -38,6 +38,19 @@ contributors: - fpsom - tobyhodges - gallantries + +recordings: +- captioners: + - hexylena + - shiltemann + - fpsom + date: '2021-02-15' + galaxy_version: '21.01' + length: 30M + youtube_id: rJWgnLgA2BE + speakers: + - fpsom + --- This tutorial will show you how to visualise RNA Sequencing Counts with R diff --git a/topics/transcriptomics/tutorials/rna-seq-viz-with-volcanoplot-r/tutorial.md b/topics/transcriptomics/tutorials/rna-seq-viz-with-volcanoplot-r/tutorial.md index d1c9f0d223eb0a..a309985a84cd94 100644 --- a/topics/transcriptomics/tutorials/rna-seq-viz-with-volcanoplot-r/tutorial.md +++ b/topics/transcriptomics/tutorials/rna-seq-viz-with-volcanoplot-r/tutorial.md @@ -34,6 +34,17 @@ requirements: tutorials: - rna-seq-viz-with-volcanoplot - rna-seq-counts-to-viz-in-r + +recordings: +- captioners: + - mblue9 + date: '2021-02-15' + galaxy_version: '21.01' + length: 15M + youtube_id: 4dspgiwkuxk + speakers: + - mblue9 + --- diff --git a/topics/transcriptomics/tutorials/rna-seq-viz-with-volcanoplot/tutorial.md b/topics/transcriptomics/tutorials/rna-seq-viz-with-volcanoplot/tutorial.md index 21384918903626..814a96c21e27fe 100644 --- a/topics/transcriptomics/tutorials/rna-seq-viz-with-volcanoplot/tutorial.md +++ b/topics/transcriptomics/tutorials/rna-seq-viz-with-volcanoplot/tutorial.md @@ -25,6 +25,17 @@ follow_up_training: topic_name: transcriptomics tutorials: - rna-seq-viz-with-volcanoplot-r + +recordings: +- captioners: + - mblue9 + date: '2021-02-15' + galaxy_version: '21.01' + length: 10M + youtube_id: uNGXIcEGZwA + speakers: + - mblue9 + --- ![Volcano plot highlighting significant genes](../../images/rna-seq-viz-with-volcanoplot/volcanoplot.png){: style="float:right;width:60%" } diff --git a/topics/variant-analysis/tutorials/sars-cov-2-variant-discovery/tutorial.md b/topics/variant-analysis/tutorials/sars-cov-2-variant-discovery/tutorial.md index b171a1eab6581b..62e8aa9d7bc89d 100644 --- a/topics/variant-analysis/tutorials/sars-cov-2-variant-discovery/tutorial.md +++ b/topics/variant-analysis/tutorials/sars-cov-2-variant-discovery/tutorial.md @@ -45,6 +45,23 @@ tags: - covid19 - virology - one-health + +recordings: +- youtube_id: hjlmCWQhBvI + date: '2023-05-10' + speakers: + - wm75 + captioners: + - wm75 + length: 55M +- captioners: + - hexylena + date: '2021-08-09' + length: 1H30M + youtube_id: vnFQ2fR_fzw + speakers: + - wm75 + --- diff --git a/topics/variant-analysis/tutorials/sars-cov-2/tutorial.md b/topics/variant-analysis/tutorials/sars-cov-2/tutorial.md index 35e749c4a5637b..a60ca1fa5f47a6 100644 --- a/topics/variant-analysis/tutorials/sars-cov-2/tutorial.md +++ b/topics/variant-analysis/tutorials/sars-cov-2/tutorial.md @@ -30,6 +30,16 @@ contributions: editing: - wm75 +recordings: +- captioners: + - nekrut + date: '2021-02-15' + galaxy_version: '21.01' + length: 15M + youtube_id: 9mIL0tIfZ_o + speakers: + - nekrut + --- diff --git a/topics/variant-analysis/tutorials/tb-variant-analysis/tutorial.md b/topics/variant-analysis/tutorials/tb-variant-analysis/tutorial.md index dcc7b86a1eec7e..8cbd4fe8316818 100644 --- a/topics/variant-analysis/tutorials/tb-variant-analysis/tutorial.md +++ b/topics/variant-analysis/tutorials/tb-variant-analysis/tutorial.md @@ -33,6 +33,27 @@ edam_ontology: - topic_0199 # Genetic variation - topic_3305 # Public health and epidemiology - topic_3324 # Infectious disease + +recordings: +- captioners: + - pvanheus + - nagoue + - hexylena + date: '2021-02-15' + galaxy_version: '21.01' + length: 40M + youtube_id: G1DmquX6Wh8 + speakers: + - pvanheus +- captioners: + - pvanheus + date: '2024-06-11' + galaxy_version: '24.0' + length: 1H17M + youtube_id: "-nJPngFk36c" + speakers: + - pvanheus + --- @@ -286,14 +307,14 @@ We still cannot entirely trust the proposed variants. In particular, there are r {: .hands_on} > Which filters to apply? -> +> > TB Variant Filter tries to provide reasonable defaults for filtering > variants predicted in the M. tuberculosis > genome, using multiple different strategies. > Firstly, certain regions of the Mtb genome > contain repetitive sequences, e.g. from > the PE/PPE gene family. Historically all of the genomic regions corresponding to -> those genes were filtered out but +> those genes were filtered out but > the new default draws on work from > Maximillian Marin and others. This > list of "refined low confidence" (RLC) @@ -360,7 +381,7 @@ We will, however, spend some time examining this data in more detail in the next We could go through all of the variants in the VCF files and read them out of a text table, but this is onerous and doesn't really give the context of the changes very well. It would be much nicer to have a visualisation of the SNPs and the other relevant data. A genome viewer, such as JBrowse, can be used within Galaxy to display the _M. tuberculosis_ genome and the data from our analysis. > Run JBrowse -> +> > > 1. Use {% tool [seqret](toolshed.g2.bx.psu.edu/repos/devteam/emboss_5/EMBOSS:%20seqret84/5.0.0) %} to convert the Genbank format reference (`Mycobacterium_tuberculosis_ancestral_reference.gbk`) to FASTA format. Use the following parameters: > - *"Sequences"*: `Mycobacterium_tuberculosis_ancestral_reference.gbk` diff --git a/topics/variant-analysis/tutorials/trio-analysis/tutorial.md b/topics/variant-analysis/tutorials/trio-analysis/tutorial.md index f4fed2991fc7ea..bda92151972e30 100644 --- a/topics/variant-analysis/tutorials/trio-analysis/tutorial.md +++ b/topics/variant-analysis/tutorials/trio-analysis/tutorial.md @@ -26,6 +26,16 @@ contributors: tags: - cyoa + +recordings: +- youtube_id: 3OWXWOkJRqU + speakers: + - JasperO98 + captioners: + - JasperO98 + date: '2023-05-12' + length: 40M + --- To discover causal mutations of inherited diseases it’s common practice to do a trio analysis. In a trio analysis DNA is sequenced of both the patient and parents. Using this method, it’s possible to identify multiple inheritance patterns. Some examples of these patterns are autosomal recessive, autosomal dominant, and de-novo variants, which are represented in the figure below. To elaborate, the most left tree shows an autosomal dominant inhertitance pattern where the offspring inherits a faulty copy of the gene from one of the parents. The center subfigure represents an autosomal recessive disease, here the offspring inherited a faulty copy of the same gene from both parents. In the right subfigure a de-novo mutation is shown, which is caused by a mutation during the offspring’s lifetime. diff --git a/topics/visualisation/tutorials/circos/slides.html b/topics/visualisation/tutorials/circos/slides.html index d3235d080bbb7a..7b1456b8c9c8d6 100644 --- a/topics/visualisation/tutorials/circos/slides.html +++ b/topics/visualisation/tutorials/circos/slides.html @@ -16,6 +16,18 @@ - hexylena - shiltemann - gallantries + +recordings: +- captioners: + - assuntad23 + - shiltemann + date: '2021-02-15' + galaxy_version: '21.01' + length: 6M + youtube_id: KCtI5KLp05k + speakers: + - hexylena + --- # Circos for Genomics Visualisation diff --git a/topics/visualisation/tutorials/circos/tutorial.md b/topics/visualisation/tutorials/circos/tutorial.md index 72d52cad39a60d..ca513705818e25 100644 --- a/topics/visualisation/tutorials/circos/tutorial.md +++ b/topics/visualisation/tutorials/circos/tutorial.md @@ -17,6 +17,20 @@ contributors: - shiltemann - hexylena - gallardoalba + +recordings: +- captioners: + - beatrizserrano + - hexylena + - shiltemann + - khanteymoori + date: '2021-02-15' + galaxy_version: '21.01' + length: 50M + youtube_id: tbqeUP67w-Y + speakers: + - hexylena + ---