This commit is contained in:
莫名的悲伤 2024-03-28 00:05:19 +08:00
parent 16a7b03162
commit f175531f75
2 changed files with 296 additions and 229 deletions

View File

@ -3,16 +3,20 @@
import sys import sys
sys.path.append('..') sys.path.append('..')
from base.spider import Spider from base.spider import Spider
import json
import base64 import base64
import hashlib
import requests
from Crypto.Cipher import AES from Crypto.Cipher import AES
import urllib
class Spider(Spider): # 元类 默认的元类 type class Spider(Spider): # 元类 默认的元类 type
def getName(self): def getName(self):
return "厂长资源" return "厂长资源"
def init(self, extend=""): def init(self, extend=""):
print("============{0}============".format(extend)) print("============{0}============".format(extend))
pass pass
def homeContent(self, filter): def homeContent(self, filter):
result = {} result = {}
cateManual = { cateManual = {
@ -33,9 +37,17 @@ class Spider(Spider): # 元类 默认的元类 type
}) })
result['class'] = classes result['class'] = classes
return result return result
def homeVideoContent(self): def homeVideoContent(self):
rsp = self.fetch("https://www.czzy66.com/") url = "https://czspp.com"
root = self.html(rsp.text) header = {
"Connection": "keep-alive",
"Referer": url,
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36"
}
session = self.getCookie(url,header)
rsp = session.get(url, headers=header)
root = self.html(self.cleanText(rsp.text))
aList = root.xpath("//div[@class='mi_btcon']//ul/li") aList = root.xpath("//div[@class='mi_btcon']//ul/li")
videos = [] videos = []
for a in aList: for a in aList:
@ -54,17 +66,51 @@ class Spider(Spider): # 元类 默认的元类 type
'list': videos 'list': videos
} }
return result return result
def getCookie(self,url):
header = {
"Referer": 'https://www.czzy77.com/',
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36"
}
session = requests.session()
rsp = session.get(url)
if '人机验证' in rsp.text:
append = self.regStr(rsp.text, 'src=\"(/.*?)\"')
nurl = 'https://www.czzy77.com' + append
nrsp = session.get(nurl, headers=header)
key = self.regStr(nrsp.text, 'var key=\"(.*?)\"')
avalue = self.regStr(nrsp.text, 'value=\"(.*?)\"')
c = ''
for i in range(0, len(avalue)):
a = avalue[i]
b = ord(a)
c = c + str(b)
value = hashlib.md5(c.encode()).hexdigest()
session.get('https://www.czzy77.com/a20be899_96a6_40b2_88ba_32f1f75f1552_yanzheng_ip.php?type=96c4e20a0e951f471d32dae103e83881&key={0}&value={1}'.format(key, value), headers=header)
return session.get(url, headers=header)
elif '检测中' in rsp.text:
append = self.regStr(rsp.text, 'href =\"(/.*?)\"')
session.get('https://www.czzy77.com{0}'.format(append), headers=header)
return session.get(url, headers=header)
else:
return rsp
def categoryContent(self, tid, pg, filter, extend): def categoryContent(self, tid, pg, filter, extend):
result = {} result = {}
url = 'https://www.czzy66.com/{0}/page/{1}'.format(tid,pg) url = 'https://www.czzy77.com/{0}/page/{1}'.format(tid,pg)
rsp = self.fetch(url) rsp = self.getCookie(url)
root = self.html(rsp.text) root = self.html(self.cleanText(rsp.text))
aList = root.xpath("//div[contains(@class,'mi_cont')]//ul/li") aList = root.xpath("//div[contains(@class,'bt_img mi_ne_kd mrb')]/ul/li")
videos = [] videos = []
for a in aList: for a in aList:
name = a.xpath('./a/img/@alt')[0] name = a.xpath('./a/img/@alt')[0]
pic = a.xpath('./a/img/@data-original')[0] pic = a.xpath('./a/img/@data-original')[0]
mark = a.xpath("./div[@class='hdinfo']/span/text()")[0] mark = a.xpath(".//div[@class='jidi']/span/text()")
if mark ==[]:
mark = a.xpath("./div[@class='hdinfo']/span/text()")
mark = mark[0]
sid = a.xpath("./a/@href")[0] sid = a.xpath("./a/@href")[0]
sid = self.regStr(sid, "/movie/(\\S+).html") sid = self.regStr(sid, "/movie/(\\S+).html")
videos.append({ videos.append({
@ -73,24 +119,22 @@ class Spider(Spider): # 元类 默认的元类 type
"vod_pic": pic, "vod_pic": pic,
"vod_remarks": mark "vod_remarks": mark
}) })
result['list'] = videos result['list'] = videos
result['page'] = pg result['page'] = pg
result['pagecount'] = 9999 result['pagecount'] = 9999
result['limit'] = 90 result['limit'] = 90
result['total'] = 999999 result['total'] = 999999
return result return result
def detailContent(self, array): def detailContent(self, array):
tid = array[0] tid = array[0]
url = 'https://www.czzy66.com/movie/{0}.html'.format(tid) url = 'https://www.czzy77.com/movie/{0}.html'.format(tid)
rsp = self.fetch(url) rsp = self.getCookie(url)
root = self.html(rsp.text) root = self.html(self.cleanText(rsp.text))
node = root.xpath("//div[@class='dyxingq']")[0] node = root.xpath("//div[@class='dyxingq']")[0]
pic = node.xpath(".//div[@class='dyimg fl']/img/@src")[0] pic = node.xpath(".//div[@class='dyimg fl']/img/@src")[0]
title = node.xpath('.//h1/text()')[0] title = node.xpath('.//h1/text()')[0]
detail = root.xpath(".//div[@class='yp_context']//p/text()")[0] detail = root.xpath(".//div[@class='yp_context']//p/text()")[0]
vod = { vod = {
"vod_id": tid, "vod_id": tid,
"vod_name": title, "vod_name": title,
@ -103,29 +147,32 @@ class Spider(Spider): # 元类 默认的元类 type
"vod_director": "", "vod_director": "",
"vod_content": detail "vod_content": detail
} }
infoArray = node.xpath(".//ul[@class='moviedteail_list']/li") infoArray = node.xpath(".//ul[@class='moviedteail_list']/li")
for info in infoArray: for info in infoArray:
content = info.xpath('string(.)') content = info.xpath('string(.)')
if content.startswith('类型'):
vod['type_name'] = content
if content.startswith('年份'):
vod['vod_year'] = content
if content.startswith('地区'): if content.startswith('地区'):
vod['vod_area'] = content tpyeare = ''
if content.startswith('豆瓣'): for inf in info:
vod['vod_remarks'] = content tn = inf.text
tpyeare = tpyeare +'/'+'{0}'.format(tn)
vod['vod_area'] = tpyeare.strip('/')
if content.startswith('年份'):
vod['vod_year'] = content.replace("年份:","")
if content.startswith('主演'): if content.startswith('主演'):
vod['vod_actor'] = content tpyeact = ''
for inf in info:
tn = inf.text
tpyeact = tpyeact +'/'+'{0}'.format(tn)
vod['vod_actor'] = tpyeact.strip('/')
if content.startswith('导演'): if content.startswith('导演'):
vod['vod_director'] = content tpyedire = ''
# if content.startswith('剧情'): for inf in info:
# vod['vod_content'] = content tn = inf.text
tpyedire = tpyedire +'/'+'{0}'.format(tn)
vod['vod_director'] = tpyedire .strip('/')
vod_play_from = '$$$' vod_play_from = '$$$'
playFrom = ['厂长'] playFrom = ['厂长']
vod_play_from = vod_play_from.join(playFrom) vod_play_from = vod_play_from.join(playFrom)
vod_play_url = '$$$' vod_play_url = '$$$'
playList = [] playList = []
vodList = root.xpath("//div[@class='paly_list_btn']") vodList = root.xpath("//div[@class='paly_list_btn']")
@ -134,7 +181,7 @@ class Spider(Spider): # 元类 默认的元类 type
aList = vl.xpath('./a') aList = vl.xpath('./a')
for tA in aList: for tA in aList:
href = tA.xpath('./@href')[0] href = tA.xpath('./@href')[0]
name = tA.xpath('./text()')[0] name = tA.xpath('./text()')[0].replace('\xa0','')
tId = self.regStr(href, '/v_play/(\\S+).html') tId = self.regStr(href, '/v_play/(\\S+).html')
vodItems.append(name + "$" + tId) vodItems.append(name + "$" + tId)
joinStr = '#' joinStr = '#'
@ -144,7 +191,6 @@ class Spider(Spider): # 元类 默认的元类 type
vod['vod_play_from'] = vod_play_from vod['vod_play_from'] = vod_play_from
vod['vod_play_url'] = vod_play_url vod['vod_play_url'] = vod_play_url
result = { result = {
'list': [ 'list': [
vod vod
@ -153,12 +199,9 @@ class Spider(Spider): # 元类 默认的元类 type
return result return result
def searchContent(self, key, quick): def searchContent(self, key, quick):
url = 'https://www.czzy66.com/xssearch?q={0}'.format(key) url = 'https://www.czzy77.com/xssearch?q={0}'.format(urllib.parse.quote(key))
# getHeader() rsp = self.getCookie(url)
rsp = self.fetch(url) root = self.html(self.cleanText(rsp.text))
root = self.html(rsp.text)
result = {}
vodList = root.xpath("//div[contains(@class,'mi_ne_kd')]/ul/li/a") vodList = root.xpath("//div[contains(@class,'mi_ne_kd')]/ul/li/a")
videos = [] videos = []
for vod in vodList: for vod in vodList:
@ -166,7 +209,11 @@ class Spider(Spider): # 元类 默认的元类 type
pic = vod.xpath('./img/@data-original')[0] pic = vod.xpath('./img/@data-original')[0]
href = vod.xpath('./@href')[0] href = vod.xpath('./@href')[0]
tid = self.regStr(href, 'movie/(\\S+).html') tid = self.regStr(href, 'movie/(\\S+).html')
remark = "" res = vod.xpath('./div[@class="jidi"]/span/text()')
if len(res) == 0:
remark = '全1集'
else:
remark = vod.xpath('./div[@class="jidi"]/span/text()')[0]
videos.append({ videos.append({
"vod_id": tid, "vod_id": tid,
"vod_name": name, "vod_name": name,
@ -177,15 +224,14 @@ class Spider(Spider): # 元类 默认的元类 type
'list': videos 'list': videos
} }
return result return result
config = { config = {
"player": {}, "player": {},
"filter": {} "filter": {}
} }
header = { header = {
"Referer": "https://www.czzy77.com/",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.75 Safari/537.36" "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.75 Safari/537.36"
} }
def parseCBC(self, enc, key, iv): def parseCBC(self, enc, key, iv):
keyBytes = key.encode("utf-8") keyBytes = key.encode("utf-8")
ivBytes = iv.encode("utf-8") ivBytes = iv.encode("utf-8")
@ -195,42 +241,50 @@ class Spider(Spider): # 元类 默认的元类 type
return msg[0:-paddingLen] return msg[0:-paddingLen]
def playerContent(self, flag, id, vipFlags): def playerContent(self, flag, id, vipFlags):
url = 'https://www.czzy66.com/v_play/{0}.html'.format(id) result = {}
url = 'https://www.czzy77.com/v_play/{0}.html'.format(id)
rsp = self.getCookie(url)
pat = '\\"([^\\"]+)\\";var [\\d\\w]+=function dncry.*md5.enc.Utf8.parse\\(\\"([\\d\\w]+)\\".*md5.enc.Utf8.parse\\(([\\d]+)\\)' pat = '\\"([^\\"]+)\\";var [\\d\\w]+=function dncry.*md5.enc.Utf8.parse\\(\\"([\\d\\w]+)\\".*md5.enc.Utf8.parse\\(([\\d]+)\\)'
rsp = self.fetch(url)
html = rsp.text html = rsp.text
content = self.regStr(html, pat) content = self.regStr(html, pat)
if content == '':
str3 = url
pars = 1
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36"
}
else:
key = self.regStr(html, pat, 2) key = self.regStr(html, pat, 2)
iv = self.regStr(html, pat, 3) iv = self.regStr(html, pat, 3)
decontent = self.parseCBC(base64.b64decode(content), key, iv).decode() decontent = self.parseCBC(base64.b64decode(content), key, iv).decode()
urlPat = 'video: \\{url: \\\"([^\\\"]+)\\\"' urlPat = 'video: \\{url: \\\"([^\\\"]+)\\\"'
vttPat = 'subtitle: \\{url:\\\"([^\\\"]+\\.vtt)\\\"' vttPat = 'subtitle: \\{url:\\\"([^\\\"]+\\.vtt)\\\"'
str3 = self.regStr(decontent, urlPat) str3 = self.regStr(decontent, urlPat)
str4 = self.regStr(decontent, vttPat) str4 = self.regStr(decontent, vttPat)
self.loadVtt(str3) self.loadVtt(str3)
pars = 0
result = { header = ''
'parse':'0',
'playUrl':'',
'url':str3,
'header':''
}
if len(str4) > 0: if len(str4) > 0:
result['subf'] = '/vtt/utf-8' result['subf'] = '/vtt/utf-8'
# result['subt'] = Proxy.localProxyUrl() + "?do=czspp&url=" + URLEncoder.encode(str4)
result['subt'] = '' result['subt'] = ''
result = {
'parse': pars,
'playUrl': '',
'url': str3,
'header': header
}
return result return result
def loadVtt(self, url): def loadVtt(self, url):
print(url) pass
def isVideoFormat(self, url): def isVideoFormat(self, url):
pass pass
def manualVideoCheck(self): def manualVideoCheck(self):
pass pass
def localProxy(self, param): def localProxy(self, param):
action = {} action = {}
return [200, "video/MP2T", action, ""] return [200, "video/MP2T", action, ""]

View File

@ -2,9 +2,9 @@
"spider": "https://fs-im-kefu.7moor-fs1.com/ly/4d2c3f00-7d4c-11e5-af15-41bf63ae4ea0/1711305426162/fan.txt;md5;602fb3226d777850053484cdeb2f3296", "spider": "https://fs-im-kefu.7moor-fs1.com/ly/4d2c3f00-7d4c-11e5-af15-41bf63ae4ea0/1711305426162/fan.txt;md5;602fb3226d777850053484cdeb2f3296",
"lives": [ "lives": [
{ {
"name": "—车—", "name": "Auto",
"type": 0, "type": 0,
"url": "https://weixine.net/tv/ysclive.txt", "url": "https://ghproxy.net/https://weixine.net/tv/ysclive.txt",
"playerType": 1, "playerType": 1,
"ua": "okhttp/3.15", "ua": "okhttp/3.15",
"epg": "http://epg.112114.xyz/?ch={name}&date={date}", "epg": "http://epg.112114.xyz/?ch={name}&date={date}",
@ -22,7 +22,7 @@
{ {
"name": "—欧—", "name": "—欧—",
"type": 0, "type": 0,
"url": "http://tv.nxog.top/m/tv/live.jpg", "url": "https://raw.githubusercontent.com/Guovin/TV/master/result.txt",
"playerType": 1, "playerType": 1,
"ua": "okhttp/3.15", "ua": "okhttp/3.15",
"epg": "http://epg.112114.xyz/?ch={name}&date={date}", "epg": "http://epg.112114.xyz/?ch={name}&date={date}",
@ -159,6 +159,16 @@
"jar": "https://jihulab.com/okcaptain/kko/-/raw/main/okpng.txt", "jar": "https://jihulab.com/okcaptain/kko/-/raw/main/okpng.txt",
"timeout": 25 "timeout": 25
}, },
{
"key": "py_czspp",
"name": "厂长┃PY",
"type": 3,
"api": "py_czspp",
"searchable": 1,
"quickSearch": 1,
"filterable": 1,
"ext": "./py_czspp.py"
},
{ {
"key": "白票弹幕版", "key": "白票弹幕版",
"name": "白票┃弹幕", "name": "白票┃弹幕",
@ -177,7 +187,10 @@
{ {
"key": "py_cctv", "key": "py_cctv",
"name": "央视┃[py]", "name": "央视┃[py]",
"style":{"type":"rect","ratio":1.485}, "style": {
"type": "rect",
"ratio": 1.485
},
"type": 3, "type": 3,
"api": "py_cctv", "api": "py_cctv",
"searchable": 1, "searchable": 1,