python - 抓取一个代理ip网页,使用cookie但是报错
问题描述
from urllib.request import *from http.cookiejar import *url = ’http://www.kuaidaili.com/proxylist/8/’cookies = MozillaCookieJar()hander = HTTPCookieProcessor(cookies)opener = build_opener(hander)install_opener(opener)html = urlopen(url).read()print(html)
这个网页访问是需要cookie的,我用上述方法访问,显示:httperror:521错误异常
问题解答
回答1:这个网站的策略就是这样,你的第一次访问都是会返回512的,但是页面还是有内容的
<html><body><script language='javascript'> window.onload=setTimeout('hv(233)', 200); function hv(OL) {var qo, mo='', no='', oo = [0xd9,0xa6,0x34,0xc9,0x42,0x3c,0xb1,0x27,0xf0,0x55,0x1b,0xb4,0x8a,0x64,0x48,0x5e,0x98,0x0e,0x03,0x58,0x2f,0x51,0x8a,0xf3,0x89,0x73,0xec,0xa2,0xda,0x63,0x19,0xe2,0x7c,0xf1,0xe6,0xaa,0xdf,0x55,0x7a,0x04,0x98,0x29,0x32,0x67,0xeb,0x70,0xd4,0x85,0x0f,0xda,0x94,0x0a,0x4e,0x92,0x0c,0x51,0xd4,0x5a,0x8f,0x15,0x9e,0xd3,0x28,0x8a,0x80,0x06,0x3b,0xdf,0x84,0x76,0x0c,0x70,0xe5,0x5a,0xee,0xe4,0x9a,0x5d,0xa1,0x16,0xcf,0xc1,0xe6,0x70,0xc0,0x41,0x76,0xea,0x5f,0xd8,0x59,0x43,0x87,0x1c,0xa1,0x3b,0x2d,0xe1,0xe3,0x48,0x79,0x2e,0xe2,0x67,0xab,0x69,0x1e,0x53,0xd7,0xec,0x8e,0x08,0x4e,0x77,0x20,0x56,0xde,0x58,0xf0,0xb4,0xa5,0x40,0xb8,0x7e,0x64,0x06,0x32,0xd6,0x5b,0x4d,0x05,0xad,0x36,0x09,0xfe,0xb3,0x08,0xa9,0x4e,0x83,0xaf,0xb4,0x15,0xa9,0xae,0x63,0xe7,0xb8,0x5a,0xb1,0xa9,0x14,0x25,0xca,0x37,0xa0,0x76,0x70,0x26,0x60,0x26,0x4a,0x3f,0x01,0x1b,0x93,0x49,0x83,0x6a,0xd3,0x89,0xc3,0xa9,0xe3,0xa5,0x9a,0x34,0x0a,0x04,0x15,0xba,0x63,0xa9,0x63,0xcb,0xf1,0xe6,0xbc,0x0e,0x6b,0x80,0x22,0x7a,0xb4,0x7a,0xe3,0x41,0x1b,0x73,0x35,0x9e,0x78,0x0e,0xfc,0x71,0x6b,0xe4,0xaa,0x13,0xd8,0xbd,0xa7,0x7d,0x17,0xd0,0x35,0x6f,0x6c,0x42,0x0c,0x00,0x66,0x40,0xd5,0x8d,0x06,0xff,0x75,0x3f,0xa7,0x69,0x1b,0x91,0x1c,0xc7,0x3b];qo = 'qo=234; do{oo[qo]=(-oo[qo])&0xff; oo[qo]=(((oo[qo]>>2)|((oo[qo]<<6)&0xff))-169)&0xff;} while(--qo>=2);'; eval(qo);qo = 233; do { oo[qo] = (oo[qo] - oo[qo - 1]) & 0xff; } while (-- qo >= 3 );qo = 1; for (;;) { if (qo > 233) break; oo[qo] = ((((((oo[qo] + 72) & 0xff) + 72) & 0xff) << 6) & 0xff) | (((((oo[qo] + 72) & 0xff) + 72) & 0xff) >> 2); qo++;}po = ''; for (qo = 1; qo < oo.length - 1; qo++) if (qo % 7) po += String.fromCharCode(oo[qo] ^ OL);eval('qo=eval;qo(po);');} </script> </body></html>
他把重要的key隐藏到js中,并通过eval函数进行转换跳转,起到一个混搅代码的作用,使用selenium的话也许可以解决这个问题
话外: 代理网站本身自己就是爬虫的代理提供者,在这反爬上面是做的很不错的。我觉得一个爬虫的重心应该是搞定主要内容,如果为了节约钱去爬取免费代理,这上面花的时间是很多的,效率未免太低了。我在公司里是直接用的kuaidaili的付费代理,基本没有在代理获取上想太多,只需要思考高并发条件下如何更好的利用代理就OK了~