Add support for the new channel layout - part 1 (#3374)

このコミットが含まれているのは:
Samantaz Fox 2022-11-02 22:05:48 +01:00
コミット 09942dee66
この署名に対応する既知のキーがデータベースに存在しません
GPGキーID: F42821059186176E
3個のファイルの変更162行の追加74行の削除

ファイルの表示

@ -5,13 +5,13 @@ CONFIG = Config.from_yaml(File.open("config/config.example.yml"))
Spectator.describe "Helper" do Spectator.describe "Helper" do
describe "#produce_channel_videos_url" do describe "#produce_channel_videos_url" do
it "correctly produces url for requesting page `x` of a channel's videos" do it "correctly produces url for requesting page `x` of a channel's videos" do
expect(produce_channel_videos_url(ucid: "UCXuqSBlHAE6Xw-yeJA0Tunw")).to eq("/browse_ajax?continuation=4qmFsgI8EhhVQ1h1cVNCbEhBRTZYdy15ZUpBMFR1bncaIEVnWjJhV1JsYjNNd0FqZ0JZQUZxQUxnQkFDQUFlZ0V4&gl=US&hl=en") # expect(produce_channel_videos_url(ucid: "UCXuqSBlHAE6Xw-yeJA0Tunw")).to eq("/browse_ajax?continuation=4qmFsgI8EhhVQ1h1cVNCbEhBRTZYdy15ZUpBMFR1bncaIEVnWjJhV1JsYjNNd0FqZ0JZQUZxQUxnQkFDQUFlZ0V4&gl=US&hl=en")
#
# expect(produce_channel_videos_url(ucid: "UCXuqSBlHAE6Xw-yeJA0Tunw", sort_by: "popular")).to eq("/browse_ajax?continuation=4qmFsgJAEhhVQ1h1cVNCbEhBRTZYdy15ZUpBMFR1bncaJEVnWjJhV1JsYjNNd0FqZ0JZQUZxQUxnQkFDQUFlZ0V4R0FFPQ%3D%3D&gl=US&hl=en")
expect(produce_channel_videos_url(ucid: "UCXuqSBlHAE6Xw-yeJA0Tunw", sort_by: "popular")).to eq("/browse_ajax?continuation=4qmFsgJAEhhVQ1h1cVNCbEhBRTZYdy15ZUpBMFR1bncaJEVnWjJhV1JsYjNNd0FqZ0JZQUZxQUxnQkFDQUFlZ0V4R0FFPQ%3D%3D&gl=US&hl=en") # expect(produce_channel_videos_url(ucid: "UCXuqSBlHAE6Xw-yeJA0Tunw", page: 20)).to eq("/browse_ajax?continuation=4qmFsgJAEhhVQ1h1cVNCbEhBRTZYdy15ZUpBMFR1bncaJEVnWjJhV1JsYjNNd0FqZ0JZQUZxQUxnQkFDQUFlZ0l5TUE9PQ%3D%3D&gl=US&hl=en")
expect(produce_channel_videos_url(ucid: "UCXuqSBlHAE6Xw-yeJA0Tunw", page: 20)).to eq("/browse_ajax?continuation=4qmFsgJAEhhVQ1h1cVNCbEhBRTZYdy15ZUpBMFR1bncaJEVnWjJhV1JsYjNNd0FqZ0JZQUZxQUxnQkFDQUFlZ0l5TUE9PQ%3D%3D&gl=US&hl=en") # expect(produce_channel_videos_url(ucid: "UC-9-kyTW8ZkZNDHQJ6FgpwQ", page: 20, sort_by: "popular")).to eq("/browse_ajax?continuation=4qmFsgJAEhhVQy05LWt5VFc4WmtaTkRIUUo2Rmdwd1EaJEVnWjJhV1JsYjNNd0FqZ0JZQUZxQUxnQkFDQUFlZ0l5TUJnQg%3D%3D&gl=US&hl=en")
expect(produce_channel_videos_url(ucid: "UC-9-kyTW8ZkZNDHQJ6FgpwQ", page: 20, sort_by: "popular")).to eq("/browse_ajax?continuation=4qmFsgJAEhhVQy05LWt5VFc4WmtaTkRIUUo2Rmdwd1EaJEVnWjJhV1JsYjNNd0FqZ0JZQUZxQUxnQkFDQUFlZ0l5TUJnQg%3D%3D&gl=US&hl=en")
end end
end end

ファイルの表示

@ -1,53 +1,48 @@
def produce_channel_videos_continuation(ucid, page = 1, auto_generated = nil, sort_by = "newest", v2 = false) def produce_channel_videos_continuation(ucid, page = 1, auto_generated = nil, sort_by = "newest", v2 = false)
object = { object_inner_2 = {
"80226972:embedded" => { "2:0:embedded" => {
"2:string" => ucid, "1:0:varint" => 0_i64,
"3:base64" => { },
"2:string" => "videos", "5:varint" => 50_i64,
"6:varint" => 2_i64, "6:varint" => 1_i64,
"7:varint" => 1_i64, "7:varint" => (page * 30).to_i64,
"12:varint" => 1_i64, "9:varint" => 1_i64,
"13:string" => "", "10:varint" => 0_i64,
"23:varint" => 0_i64, }
object_inner_2_encoded = object_inner_2
.try { |i| Protodec::Any.cast_json(i) }
.try { |i| Protodec::Any.from_json(i) }
.try { |i| Base64.urlsafe_encode(i) }
.try { |i| URI.encode_www_form(i) }
object_inner_1 = {
"110:embedded" => {
"3:embedded" => {
"15:embedded" => {
"1:embedded" => {
"1:string" => object_inner_2_encoded,
"2:string" => "00000000-0000-0000-0000-000000000000",
},
"3:varint" => 1_i64,
},
}, },
}, },
} }
if !v2 object_inner_1_encoded = object_inner_1
if auto_generated .try { |i| Protodec::Any.cast_json(i) }
seed = Time.unix(1525757349) .try { |i| Protodec::Any.from_json(i) }
until seed >= Time.utc .try { |i| Base64.urlsafe_encode(i) }
seed += 1.month .try { |i| URI.encode_www_form(i) }
end
timestamp = seed - (page - 1).months
object["80226972:embedded"]["3:base64"].as(Hash)["4:varint"] = 0x36_i64 object = {
object["80226972:embedded"]["3:base64"].as(Hash)["15:string"] = "#{timestamp.to_unix}" "80226972:embedded" => {
else "2:string" => ucid,
object["80226972:embedded"]["3:base64"].as(Hash)["4:varint"] = 0_i64 "3:string" => object_inner_1_encoded,
object["80226972:embedded"]["3:base64"].as(Hash)["15:string"] = "#{page}" "35:string" => "browse-feed#{ucid}videos102",
end },
else }
object["80226972:embedded"]["3:base64"].as(Hash)["4:varint"] = 0_i64
object["80226972:embedded"]["3:base64"].as(Hash)["61:string"] = Base64.urlsafe_encode(Protodec::Any.from_json(Protodec::Any.cast_json({
"1:string" => Base64.urlsafe_encode(Protodec::Any.from_json(Protodec::Any.cast_json({
"1:varint" => 30_i64 * (page - 1),
}))),
})))
end
case sort_by
when "newest"
when "popular"
object["80226972:embedded"]["3:base64"].as(Hash)["3:varint"] = 0x01_i64
when "oldest"
object["80226972:embedded"]["3:base64"].as(Hash)["3:varint"] = 0x02_i64
else nil # Ignore
end
object["80226972:embedded"]["3:string"] = Base64.urlsafe_encode(Protodec::Any.from_json(Protodec::Any.cast_json(object["80226972:embedded"]["3:base64"])))
object["80226972:embedded"].delete("3:base64")
continuation = object.try { |i| Protodec::Any.cast_json(i) } continuation = object.try { |i| Protodec::Any.cast_json(i) }
.try { |i| Protodec::Any.from_json(i) } .try { |i| Protodec::Any.from_json(i) }
@ -67,10 +62,11 @@ end
def get_60_videos(ucid, author, page, auto_generated, sort_by = "newest") def get_60_videos(ucid, author, page, auto_generated, sort_by = "newest")
videos = [] of SearchVideo videos = [] of SearchVideo
2.times do |i| # 2.times do |i|
initial_data = get_channel_videos_response(ucid, page * 2 + (i - 1), auto_generated: auto_generated, sort_by: sort_by) # initial_data = get_channel_videos_response(ucid, page * 2 + (i - 1), auto_generated: auto_generated, sort_by: sort_by)
videos.concat extract_videos(initial_data, author, ucid) initial_data = get_channel_videos_response(ucid, 1, auto_generated: auto_generated, sort_by: sort_by)
end videos = extract_videos(initial_data, author, ucid)
# end
return videos.size, videos return videos.size, videos
end end

ファイルの表示

@ -17,6 +17,7 @@ private ITEM_PARSERS = {
Parsers::PlaylistRendererParser, Parsers::PlaylistRendererParser,
Parsers::CategoryRendererParser, Parsers::CategoryRendererParser,
Parsers::RichItemRendererParser, Parsers::RichItemRendererParser,
Parsers::ReelItemRendererParser,
} }
record AuthorFallback, name : String, id : String record AuthorFallback, name : String, id : String
@ -369,7 +370,7 @@ private module Parsers
end end
# Parses an InnerTube richItemRenderer into a SearchVideo. # Parses an InnerTube richItemRenderer into a SearchVideo.
# Returns nil when the given object isn't a shelfRenderer # Returns nil when the given object isn't a RichItemRenderer
# #
# A richItemRenderer seems to be a simple wrapper for a videoRenderer, used # A richItemRenderer seems to be a simple wrapper for a videoRenderer, used
# by the result page for hashtags. It is located inside a continuationItems # by the result page for hashtags. It is located inside a continuationItems
@ -390,6 +391,90 @@ private module Parsers
return {{@type.name}} return {{@type.name}}
end end
end end
# Parses an InnerTube reelItemRenderer into a SearchVideo.
# Returns nil when the given object isn't a reelItemRenderer
#
# reelItemRenderer items are used in the new (2022) channel layout,
# in the "shorts" tab.
#
module ReelItemRendererParser
def self.process(item : JSON::Any, author_fallback : AuthorFallback)
if item_contents = item["reelItemRenderer"]?
return self.parse(item_contents, author_fallback)
end
end
private def self.parse(item_contents, author_fallback)
video_id = item_contents["videoId"].as_s
video_details_container = item_contents.dig(
"navigationEndpoint", "reelWatchEndpoint",
"overlay", "reelPlayerOverlayRenderer",
"reelPlayerHeaderSupportedRenderers",
"reelPlayerHeaderRenderer"
)
# Author infos
author = video_details_container
.dig?("channelTitleText", "runs", 0, "text")
.try &.as_s || author_fallback.name
ucid = video_details_container
.dig?("channelNavigationEndpoint", "browseEndpoint", "browseId")
.try &.as_s || author_fallback.id
# Title & publication date
title = video_details_container.dig?("reelTitleText")
.try { |t| extract_text(t) } || ""
published = video_details_container
.dig?("timestampText", "simpleText")
.try { |t| decode_date(t.as_s) } || Time.utc
# View count
view_count_text = video_details_container.dig?("viewCountText", "simpleText")
view_count_text ||= video_details_container
.dig?("viewCountText", "accessibility", "accessibilityData", "label")
view_count = view_count_text.try &.as_s.gsub(/\D+/, "").to_i64? || 0_i64
# Duration
a11y_data = item_contents
.dig?("accessibility", "accessibilityData", "label")
.try &.as_s || ""
regex_match = /- (?<min>\d+ minutes? )?(?<sec>\d+ seconds?)+ -/.match(a11y_data)
minutes = regex_match.try &.["min"].to_i(strict: false) || 0
seconds = regex_match.try &.["sec"].to_i(strict: false) || 0
duration = (minutes*60 + seconds)
SearchVideo.new({
title: title,
id: video_id,
author: author,
ucid: ucid,
published: published,
views: view_count,
description_html: "",
length_seconds: duration,
live_now: false,
premium: false,
premiere_timestamp: Time.unix(0),
author_verified: false,
})
end
def self.parser_name
return {{@type.name}}
end
end
end end
# The following are the extractors for extracting an array of items from # The following are the extractors for extracting an array of items from
@ -436,21 +521,31 @@ private module Extractors
content = extract_selected_tab(target["tabs"])["content"] content = extract_selected_tab(target["tabs"])["content"]
if section_list_contents = content.dig?("sectionListRenderer", "contents") if section_list_contents = content.dig?("sectionListRenderer", "contents")
section_list_contents.as_a.each do |renderer_container| raw_items = unpack_section_list(section_list_contents)
renderer_container_contents = renderer_container["itemSectionRenderer"]["contents"][0] elsif rich_grid_contents = content.dig?("richGridRenderer", "contents")
raw_items = rich_grid_contents.as_a
end
# Category extraction return raw_items
if items_container = renderer_container_contents["shelfRenderer"]? end
raw_items << renderer_container_contents
next
elsif items_container = renderer_container_contents["gridRenderer"]?
else
items_container = renderer_container_contents
end
items_container["items"]?.try &.as_a.each do |item| private def self.unpack_section_list(contents)
raw_items << item raw_items = [] of JSON::Any
end
contents.as_a.each do |renderer_container|
renderer_container_contents = renderer_container["itemSectionRenderer"]["contents"][0]
# Category extraction
if items_container = renderer_container_contents["shelfRenderer"]?
raw_items << renderer_container_contents
next
elsif items_container = renderer_container_contents["gridRenderer"]?
else
items_container = renderer_container_contents
end
items_container["items"]?.try &.as_a.each do |item|
raw_items << item
end end
end end
@ -525,14 +620,11 @@ private module Extractors
end end
private def self.extract(target) private def self.extract(target)
raw_items = [] of JSON::Any content = target["continuationItems"]?
if content = target["gridContinuation"]? content ||= target.dig?("gridContinuation", "items")
raw_items = content["items"].as_a content ||= target.dig?("richGridContinuation", "contents")
elsif content = target["continuationItems"]?
raw_items = content.as_a
end
return raw_items return content.nil? ? [] of JSON::Any : content.as_a
end end
def self.extractor_name def self.extractor_name