Add fix for playlists with less than 100 videos

このコミットが含まれているのは:
Omar Roth 2018-09-22 14:13:10 -05:00
コミット 35ac887133
2個のファイルの変更79行の追加56行の削除

ファイルの表示

@ -390,13 +390,14 @@ get "/playlist" do |env|
page = env.params.query["page"]?.try &.to_i?
page ||= 1
playlist = fetch_playlist(plid)
begin
videos = extract_playlist(plid, page)
videos = fetch_playlist_videos(plid, page, playlist.video_count)
rescue ex
error_message = ex.message
next templated "error"
end
playlist = fetch_playlist(plid)
templated "playlist"
end
@ -470,11 +471,11 @@ get "/search" do |env|
elsif subscriptions
videos = PG_DB.query_all("SELECT id,title,published,updated,ucid,author FROM (
SELECT *,
to_tsvector(channel_videos.title) ||
to_tsvector(channel_videos.author)
as document
to_tsvector(channel_videos.title) ||
to_tsvector(channel_videos.author)
as document
FROM channel_videos WHERE ucid IN (#{arg_array(ucids, 3)})
) v_search WHERE v_search.document @@ plainto_tsquery($1) LIMIT 20 OFFSET $2;", [search_query, (page - 1) * 20] + ucids, as: ChannelVideo)
) v_search WHERE v_search.document @@ plainto_tsquery($1) LIMIT 20 OFFSET $2;", [search_query, (page - 1) * 20] + ucids, as: ChannelVideo)
count = videos.size
else
begin
@ -2822,15 +2823,15 @@ get "/api/v1/playlists/:plid" do |env|
page = env.params.query["page"]?.try &.to_i?
page ||= 1
playlist = fetch_playlist(plid)
begin
videos = extract_playlist(plid, page)
videos = fetch_playlist_videos(plid, page, playlist.video_count)
rescue ex
error_message = {"error" => "Playlist is empty"}.to_json
halt env, status_code: 404, response: error_message
end
playlist = fetch_playlist(plid)
response = JSON.build do |json|
json.object do
json.field "title", playlist.title

ファイルの表示

@ -25,58 +25,75 @@ class PlaylistVideo
})
end
def extract_playlist(plid, page)
index = (page - 1) * 100
url = produce_playlist_url(plid, index)
def fetch_playlist_videos(plid, page, video_count)
client = make_client(YT_URL)
response = client.get(url)
response = JSON.parse(response.body)
if !response["content_html"]? || response["content_html"].as_s.empty?
raise "Playlist does not exist"
if video_count > 100
index = (page - 1) * 100
url = produce_playlist_url(plid, index)
response = client.get(url)
response = JSON.parse(response.body)
if !response["content_html"]? || response["content_html"].as_s.empty?
raise "Playlist is empty"
end
document = XML.parse_html(response["content_html"].as_s)
nodeset = document.xpath_nodes(%q(.//tr[contains(@class, "pl-video")]))
videos = extract_playlist(plid, nodeset, index)
else
if page > 1
videos = [] of PlaylistVideo
else
response = client.get("/playlist?list=#{plid}&disable_polymer=1")
document = XML.parse_html(response.body)
nodeset = document.xpath_nodes(%q(.//tr[contains(@class, "pl-video")]))
videos = extract_playlist(plid, nodeset, 0)
end
end
return videos
end
def extract_playlist(plid, nodeset, index)
videos = [] of PlaylistVideo
document = XML.parse_html(response["content_html"].as_s)
anchor = document.xpath_node(%q(//div[@class="pl-video-owner"]/a))
if anchor
document.xpath_nodes(%q(.//tr[contains(@class, "pl-video")])).each_with_index do |video, offset|
anchor = video.xpath_node(%q(.//td[@class="pl-video-title"]))
if !anchor
next
end
title = anchor.xpath_node(%q(.//a)).not_nil!.content.strip(" \n")
id = anchor.xpath_node(%q(.//a)).not_nil!["href"].lchop("/watch?v=")[0, 11]
anchor = anchor.xpath_node(%q(.//div[@class="pl-video-owner"]/a))
if anchor
author = anchor.content
ucid = anchor["href"].split("/")[2]
else
author = ""
ucid = ""
end
anchor = video.xpath_node(%q(.//td[@class="pl-video-time"]/div/div[1]))
if anchor && !anchor.content.empty?
length_seconds = decode_length_seconds(anchor.content)
else
length_seconds = 0
end
videos << PlaylistVideo.new(
title,
id,
author,
ucid,
length_seconds,
Time.now,
[plid],
index + offset,
)
nodeset.each_with_index do |video, offset|
anchor = video.xpath_node(%q(.//td[@class="pl-video-title"]))
if !anchor
next
end
title = anchor.xpath_node(%q(.//a)).not_nil!.content.strip(" \n")
id = anchor.xpath_node(%q(.//a)).not_nil!["href"].lchop("/watch?v=")[0, 11]
anchor = anchor.xpath_node(%q(.//div[@class="pl-video-owner"]/a))
if anchor
author = anchor.content
ucid = anchor["href"].split("/")[2]
else
author = ""
ucid = ""
end
anchor = video.xpath_node(%q(.//td[@class="pl-video-time"]/div/div[1]))
if anchor && !anchor.content.empty?
length_seconds = decode_length_seconds(anchor.content)
else
length_seconds = 0
end
videos << PlaylistVideo.new(
title,
id,
author,
ucid,
length_seconds,
Time.now,
[plid],
index + offset,
)
end
return videos
@ -112,13 +129,18 @@ def produce_playlist_url(id, index)
continuation = Base64.urlsafe_encode(continuation)
continuation = URI.escape(continuation)
url = "/browse_ajax?action_continuation=1&continuation=#{continuation}"
url = "/browse_ajax?continuation=#{continuation}"
return url
end
def fetch_playlist(plid)
client = make_client(YT_URL)
if plid.starts_with? "UC"
plid = "UU#{plid.lchop("UC")}"
end
response = client.get("/playlist?list=#{plid}&disable_polymer=1")
body = response.body.gsub(<<-END_BUTTON
<button class="yt-uix-button yt-uix-button-size-default yt-uix-button-link yt-uix-expander-head playlist-description-expander yt-uix-inlineedit-ignore-edit" type="button" onclick=";return false;"><span class="yt-uix-button-content"> less <img alt="" src="/yts/img/pixel-vfl3z5WfW.gif">