国产成人精品久久免费动漫-国产成人精品天堂-国产成人精品区在线观看-国产成人精品日本-a级毛片无码免费真人-a级毛片毛片免费观看久潮喷

您的位置:首頁(yè)技術(shù)文章
文章詳情頁(yè)

python 爬取影視網(wǎng)站下載鏈接

瀏覽:67日期:2022-06-18 09:31:11
目錄項(xiàng)目地址:運(yùn)行效果導(dǎo)入模塊爬蟲主代碼完整代碼項(xiàng)目地址:

https://github.com/GriffinLewis2001/Python_movie_links_scraper

運(yùn)行效果

python 爬取影視網(wǎng)站下載鏈接

python 爬取影視網(wǎng)站下載鏈接

導(dǎo)入模塊

import requests,refrom requests.cookies import RequestsCookieJarfrom fake_useragent import UserAgentimport os,pickle,threading,timeimport concurrent.futuresfrom goto import with_goto爬蟲主代碼

def get_content_url_name(url): send_headers = { 'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36', 'Connection': 'keep-alive', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Language': 'zh-CN,zh;q=0.8'} cookie_jar = RequestsCookieJar() cookie_jar.set('mttp', '9740fe449238', domain='www.yikedy.co') response=requests.get(url,send_headers,cookies=cookie_jar) response.encoding=’utf-8’ content=response.text reg=re.compile(r’<a rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' ’) url_name_list=reg.findall(content) return url_name_listdef get_content(url): send_headers = { 'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36', 'Connection': 'keep-alive', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Language': 'zh-CN,zh;q=0.8'} cookie_jar = RequestsCookieJar() cookie_jar.set('mttp', '9740fe449238', domain='www.yikedy.co') response=requests.get(url,send_headers,cookies=cookie_jar) response.encoding=’utf-8’ return response.textdef search_durl(url): content=get_content(url) reg=re.compile(r'{’x64x65x63x72x69x70x74x50x61x72x61x6d’:’(.*?)’}') index=reg.findall(content)[0] download_url=url[:-5]+r’/downloadList?decriptParam=’+index content=get_content(download_url) reg1=re.compile(r’title='.*?' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' ’) download_list=reg1.findall(content) return download_listdef get_page(url): send_headers = { 'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36', 'Connection': 'keep-alive', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Language': 'zh-CN,zh;q=0.8'} cookie_jar = RequestsCookieJar() cookie_jar.set('mttp', '9740fe449238', domain='www.yikedy.co') response=requests.get(url,send_headers,cookies=cookie_jar) response.encoding=’utf-8’ content=response.text reg=re.compile(r’<a target='_blank' href='http://www.cgvv.com.cn/bcjs/(.*?)' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' >(.*?)</a>’) url_name_list=reg.findall(content) return url_name_list@with_gotodef main(): print('=========================================================') name=input('請(qǐng)輸入劇名(輸入quit退出):') if name == 'quit':exit() url='http://www.yikedy.co/search?query='+name dlist=get_page(url) print('n') if(dlist):num=0count=0for i in dlist: if (name in i[1]) :print(f'{num} {i[1]}')num+=1 elif num==0 and count==len(dlist)-1:goto .end count+=1dest=int(input('nn請(qǐng)輸入劇的編號(hào)(輸100跳過此次搜尋):'))if dest == 100: goto .endx=0print('n以下為下載鏈接:n')for i in dlist: if (name in i[1]):if(x==dest): for durl in search_durl(i[0]):print(f'{durl}n') print('n') breakx+=1 else:label .endprint('沒找到或不想看n')完整代碼

import requests,refrom requests.cookies import RequestsCookieJarfrom fake_useragent import UserAgentimport os,pickle,threading,timeimport concurrent.futuresfrom goto import with_gotodef get_content_url_name(url): send_headers = { 'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36', 'Connection': 'keep-alive', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Language': 'zh-CN,zh;q=0.8'} cookie_jar = RequestsCookieJar() cookie_jar.set('mttp', '9740fe449238', domain='www.yikedy.co') response=requests.get(url,send_headers,cookies=cookie_jar) response.encoding=’utf-8’ content=response.text reg=re.compile(r’<a href='http://www.cgvv.com.cn/bcjs/(.*?)' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' ’) url_name_list=reg.findall(content) return url_name_listdef get_content(url): send_headers = { 'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36', 'Connection': 'keep-alive', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Language': 'zh-CN,zh;q=0.8'} cookie_jar = RequestsCookieJar() cookie_jar.set('mttp', '9740fe449238', domain='www.yikedy.co') response=requests.get(url,send_headers,cookies=cookie_jar) response.encoding=’utf-8’ return response.textdef search_durl(url): content=get_content(url) reg=re.compile(r'{’x64x65x63x72x69x70x74x50x61x72x61x6d’:’(.*?)’}') index=reg.findall(content)[0] download_url=url[:-5]+r’/downloadList?decriptParam=’+index content=get_content(download_url) reg1=re.compile(r’title='.*?' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' ’) download_list=reg1.findall(content) return download_listdef get_page(url): send_headers = { 'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36', 'Connection': 'keep-alive', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Language': 'zh-CN,zh;q=0.8'} cookie_jar = RequestsCookieJar() cookie_jar.set('mttp', '9740fe449238', domain='www.yikedy.co') response=requests.get(url,send_headers,cookies=cookie_jar) response.encoding=’utf-8’ content=response.text reg=re.compile(r’<a target='_blank' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' >(.*?)</a>’) url_name_list=reg.findall(content) return url_name_list@with_gotodef main(): print('=========================================================') name=input('請(qǐng)輸入劇名(輸入quit退出):') if name == 'quit':exit() url='http://www.xxx.com/search?query='+name dlist=get_page(url) print('n') if(dlist):num=0count=0for i in dlist: if (name in i[1]) :print(f'{num} {i[1]}')num+=1 elif num==0 and count==len(dlist)-1:goto .end count+=1dest=int(input('nn請(qǐng)輸入劇的編號(hào)(輸100跳過此次搜尋):'))if dest == 100: goto .endx=0print('n以下為下載鏈接:n')for i in dlist: if (name in i[1]):if(x==dest): for durl in search_durl(i[0]):print(f'{durl}n') print('n') breakx+=1 else:label .endprint('沒找到或不想看n')print('本軟件由CLY.所有nn')while(True): main()

以上就是python 爬取影視網(wǎng)站下載鏈接的詳細(xì)內(nèi)容,更多關(guān)于python 爬取下載鏈接的資料請(qǐng)關(guān)注好吧啦網(wǎng)其它相關(guān)文章!

標(biāo)簽: Python 編程
相關(guān)文章:
主站蜘蛛池模板: 亚洲天堂网在线观看视频 | 亚洲美女视频免费 | 国产成人精品曰本亚洲77美色 | 欧美人成在线观看网站高清 | 免费国产a | 精品亚洲视频在线 | 久久精品视频日本 | 亚洲精品视频网 | 白云精品视频国产专区 | 国产欧美日韩视频在线观看一区二区 | 欧美一级特黄视频 | 欧美一二三区在线 | 国产免费一区二区三区免费视频 | 国产亚洲欧洲一区二区三区 | 女黄人东京手机福利视频 | 青青热久久综合网伊人 | 国产精品毛片在线更新 | 国产亚洲精品久久久久久久 | 日本a级片免费看 | 男女扒开双腿猛进入爽爽视频 | 综合免费视频 | 亚洲天堂二区 | 欧美日韩视频在线 | 午夜视频在线观看一区二区 | 最近日本免费观看视频 | 一级毛片成人免费看a | 中文字幕成人免费高清在线 | 亚洲精品在线视频观看 | 国产美女作爱全过程免费视频 | 91久久国产综合精品女同我 | 欧美亚洲国产视频 | 亚洲国产精品日韩在线观看 | 中文字幕亚洲区 | 国产91会所洗浴女技师按摩 | 国产成人精品免费视频大全可播放的 | 国产成人网 | 国产精品制服 | 一级做a爰| 杨幂精品国产专区91在线 | 一级特黄aaa大片免费看 | 欧美成在人线a免费 |