用python的selenium实现京东夺宝岛最后一秒自动下单

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from selenium import webdriver
import requests
import time
import datetime
import json
def get_endTime(paimaiID):
endTimeURL = 'http://paimai.jd.com/services/currentList.action?paimaiIds={0}&callback=jQuery5542114'.format(
paimaiID)
req = requests.get(endTimeURL)
data = json.loads(req.text[15: -2])
return data['endTime'] - 900
def paimai(endTime, paimaiID, name, passWord, want_price):
driver = webdriver.Firefox()
driver.get(
'https://passport.jd.com/new/login.aspx?ReturnUrl=http://paimai.jd.com/{0}'.format(paimaiID))
email = driver.find_element_by_xpath('//input[@id="loginname"]')
email.clear()
email.send_keys(name)
password = driver.find_element_by_xpath('//input[@id="nloginpwd"]')
password.clear()
password.send_keys(passWord)
form = driver.find_element_by_xpath('//a[@id="loginsubmit"]')
form.click()
try:
captcha = driver.find_element_by_xpath('//input[@id="authcode"]')
if captcha:
input_captcha = raw_input()
captcha.send_keys(input_captcha)
form.click()
except:
pass
while 1:
nowTime = int(time.time() * 1000)
if nowTime == endTime:
break
current_price_url = 'http://paimai.jd.com/json/current/englishquery?paimaiId={0}&skuId=0&start=0&end=9'.format(
paimaiID)
current_price = json.loads(
requests.get(current_price_url).content)['currentPrice']
last_price = int(eval(current_price) + 1)
print last_price
if last_price > want_price:
return
price = driver.find_element_by_xpath('//input[@id="bidPrice"]')
price.clear()
price.send_keys(str(int(last_price)))
botton = driver.find_element_by_xpath(
'//div[@id="auctionStatus1"]/div[2]/a[1]')
botton.click()
if __name__ == "__main__":
# 夺宝岛商品id
paimaiID = '11087928'
# 登录名
name = "xxxxxx"
# 密码
password = 'xxxxxx'
# 预期心理价位
want_price = 1000
endTime = get_endTime(paimaiID)
print endTime
paimai(endTime, paimaiID, name, password, want_price)

用python找出知乎某个答案点赞的所有名单

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import requests
from bs4 import BeautifulSoup
import time
import json
import os
import sys

url = 'http://www.zhihu.com'
loginURL = 'http://www.zhihu.com/login/email'

headers = {
"User-Agent": 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:41.0) Gecko/20100101 Firefox/41.0',
"Referer": "http://www.zhihu.com/",
'Host': 'www.zhihu.com',
}

data = {
'email': 'xxxxx@gmail.com',
'password': 'xxxxxxx',
'rememberme': "true",
}

s = requests.session()
# 如果成功登陆过,用保存的cookies登录
if os.path.exists('cookiefile'):
with open('cookiefile') as f:
cookie = json.load(f)
s.cookies.update(cookie)
req1 = s.get(url, headers=headers)
with open('zhihu.html', 'w') as f:
f.write(req1.content)
# 第一次需要手动输入验证码登录
else:
req = s.get(url, headers=headers)
print req

soup = BeautifulSoup(req.text, "html.parser")
xsrf = soup.find('input', {'name': '_xsrf', 'type': 'hidden'}).get('value')

data['_xsrf'] = xsrf

timestamp = int(time.time() * 1000)
captchaURL = 'http://www.zhihu.com/captcha.gif?=' + str(timestamp)
print captchaURL

with open('zhihucaptcha.gif', 'wb') as f:
captchaREQ = s.get(captchaURL)
f.write(captchaREQ.content)
loginCaptcha = raw_input('input captcha:\n').strip()
data['captcha'] = loginCaptcha
# print data
loginREQ = s.post(loginURL, headers=headers, data=data)
# print loginREQ.url
# print s.cookies.get_dict()
if not loginREQ.json()['r']:
# print loginREQ.json()
with open('cookiefile', 'wb') as f:
json.dump(s.cookies.get_dict(), f)
else:
print 'login failed, try again!'
sys.exit(1)

# 以http://www.zhihu.com/question/27621722/answer/48820436这个大神的399各赞为例子.
zanBaseURL = 'http://www.zhihu.com/answer/22229844/voters_profile?&offset={0}'
page = 0
count = 0
while 1:
zanURL = zanBaseURL.format(str(page))
page += 10
zanREQ = s.get(zanURL, headers=headers)
zanData = zanREQ.json()['payload']
if not zanData:
break
for item in zanData:
# print item
zanSoup = BeautifulSoup(item, "html.parser")
zanInfo = zanSoup.find('a', {'target': "_blank", 'class': 'zg-link'})
if zanInfo:
print 'nickname:', zanInfo.get('title'), ' ',
print 'person_url:', zanInfo.get('href')
else:
anonymous = zanSoup.find(
'img', {'title': True, 'class': "zm-item-img-avatar"})
print 'nickname:', anonymous.get('title')

count += 1
print count

MAC OS X 10.11.6 El Capitan安装方法与步骤

苹果公司发布了最新的Mac系统El Capitan,我也跟风安装了,
昨天试了一天终于算是安装成功了.

电脑配置:

CPU: E3-1230 v2
主板: 技嘉B75M D3V
显卡: 微星6850
声卡: Realtek ALC887
键盘: Noppoo 84键机械键盘

下载

远景论坛czczyx大神制作的原版镜像,OS X El Capitan 15A284原版安装U盘镜像(带Clover 3270引导)

在win下面用zd423大神制作的不限速的百度云4.62版本下载大概两个小时.

备份到U盘

在win下,管理员权限运行TransMac工具, 将验证过MD5码的10.11镜像恢复到至少8G的U盘上.
然后启动原来的mac10.10, 用Clover Configurator工具打开EFI文件里面的config.plist配置文件,将System Parameters的Inject Kents改成Yes, 以防安装的时候一直菊花白屏.

或者,进入到CLOVER引导界面后,选择boot install mac os x with inject

安装

重启电脑, 一直按F12, 选择UEFI U盘启动, 选择安装 EL Captain,
大概四五分钟后,

  • 进入安装选择界面,
  • 选择磁盘工具, 格式化要安装的硬盘
  • 安装10.11,
  • 大概20分钟到最后1秒, 这个1秒也要持续10~20分钟
  • 重启后再次U盘选择安装EL Captain, 这次同样需要20分钟左右
  • 重启后u盘选择从刚刚安装的硬盘启动El Captain,
  • 配置用户名密码,不要选择网络,之后进入桌面

安装驱动

  • 安装事先下载好的clover, 安装的时候选择自定,然后勾选’UEFI开机版本’

  • 将U盘的EFI目录粘贴覆盖到电脑的EFI盘中, 如果没有显示可以用Clover Configurator挂载.

  • 将下载好的网卡驱动拷贝到’/Volumes/EFI/EFI/CLOVER/kexts/10.11’. 我用的也是远景大神提供的下载

  • 重启电脑,安装声卡,我主板是技嘉的B75M-D3V, 声卡型号是ALC887, 在10.10和10.9的时候一直用MultiBeast的声卡驱动.现在El Captain版本的MultiBeast还没有出, 7.5版本选择好声卡驱动building的时候软件报错退出.

  • 然后开始了漫长的尝试各种声卡驱动历程, 远景大神外国大神提供的显卡驱动,我用Kext Wizard安装并且修复权限后,能出声音, 但是出声大概5秒后开始噪音,然后卡顿.

  • 安装VoodooHDA 2.8.8重启,还没进入桌面就很大噪音发出来.同样没法用.

  • 最后尝试到Voodoo_HDA_2.8.5_MAV.pkg,还算可以, 就是启动载入桌面前有一声爆音, 其他还可以. 不算完美,也能用吧.至少不卡顿和没有噪音.

  • Noppoo的机械键盘与MAC osx不兼容,需要安装大神只做好的驱动和软件, 参考IOUSBHIDDriverDescriptorOverride,

python模拟登陆京东

首次登录需要手动输入验证码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2015-10-11 19:10:06
import sys
reload(sys)
sys.setdefaultencoding('utf-8')

import requests
from bs4 import BeautifulSoup
import time
import os
import json

url = 'https://passport.jd.com/uc/login'
s = requests.session()
data = {
'chkRememberMe': 'on',
'loginname': 'xxxx',
'loginpwd': 'xxxxx',
'machineCpu': '',
'machineDisk': '',
'machineNet': '',
'nloginpwd': 'xxxxxx'
}
if os.path.exists('jdcookie'):
with open('jdcookie') as f:
cookie = json.load(f)
s.cookies.update(cookie)

else:
req = s.get(url)

soup = BeautifulSoup(req.text, "html.parser")
items = soup.select('form#formlogin > input')
uuid = items[0].get('value').encode('utf-8')
data['uuid'] = uuid

input_name = items[-1].get('name').encode('utf-8')
input_value = items[-1].get('value').encode('utf-8')
data[input_name] = input_value
verify_url = soup.find('img', id='JD_Verification1')[
'src2'] + '&yys=' + str(int(time.time() * 1000))
print verify_url
img = s.get(verify_url)
f = open('image.jpg', 'wb')
f.write(img.content)
f.close()
print 'input code:'
authcode = raw_input()
data['authcode'] = authcode
print data
postreq = s.post(
'https://passport.jd.com/uc/loginService?version=2015', data=data)
postreq.encoding = 'gbk'
print postreq.text
if 'success' in postreq.text:
with open('jdcookie', 'w') as f:
json.dump(s.cookies.get_dict(), f)

python模拟登陆知乎

第一次登陆的时候需要手动输入验证码,
之后用cookie登陆.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2015-11-12 20:08:49
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import requests
from bs4 import BeautifulSoup
import time
import json
import os
url = 'http://www.zhihu.com'
loginURL = 'http://www.zhihu.com/login/email'
headers = {
"User-Agent": 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:41.0) Gecko/20100101 Firefox/41.0',
"Referer": "http://www.zhihu.com/",
'Host': 'www.zhihu.com',
}
data = {
'email': 'xxxxxx@gmail.com',
'password': 'xxxxxxxxx',
'rememberme': "true",
}
s = requests.session()
if os.path.exists('cookiefile'):
with open('cookiefile') as f:
cookie = json.load(f)
s.cookies.update(cookie)
req1 = s.get(url, headers=headers)
# 建立一个zhihu.html文件,用于验证是否登陆成功
with open('zhihu.html', 'w') as f:
f.write(req1.content)
else:
req = s.get(url, headers=headers)
print req
soup = BeautifulSoup(req.text, "html.parser")
xsrf = soup.find('input', {'name': '_xsrf', 'type': 'hidden'}).get('value')
data['_xsrf'] = xsrf
timestamp = int(time.time() * 1000)
captchaURL = 'http://www.zhihu.com/captcha.gif?=' + str(timestamp)
print captchaURL
with open('zhihucaptcha.gif', 'wb') as f:
captchaREQ = s.get(captchaURL)
f.write(captchaREQ.content)
loginCaptcha = raw_input('input captcha:\n').strip()
data['captcha'] = loginCaptcha
print data
loginREQ = s.post(loginURL, headers=headers, data=data)
if not loginREQ.json()['r']:
print s.cookies.get_dict()
with open('cookiefile', 'wb') as f:
json.dump(s.cookies.get_dict(), f)
else:
print 'login fail'

python抓取网易云音乐最新评论和评论总数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
import json
import os
import base64
from Crypto.Cipher import AES
from pprint import pprint


def aesEncrypt(text, secKey):
pad = 16 - len(text) % 16
text = text + pad * chr(pad)
encryptor = AES.new(secKey, 2, '0102030405060708')
ciphertext = encryptor.encrypt(text)
ciphertext = base64.b64encode(ciphertext)
return ciphertext


def rsaEncrypt(text, pubKey, modulus):
text = text[::-1]
rs = int(text.encode('hex'), 16)**int(pubKey, 16) % int(modulus, 16)
return format(rs, 'x').zfill(256)


def createSecretKey(size):
return (''.join(map(lambda xx: (hex(ord(xx))[2:]), os.urandom(size))))[0:16]


url = 'http://music.163.com/weapi/v1/resource/comments/R_SO_4_30953009/?csrf_token='
headers = {
'Cookie': 'appver=1.5.0.75771;',
'Referer': 'http://music.163.com/'
}
text = {
'username': '邮箱',
'password': '密码',
'rememberLogin': 'true'
}

#下面三个参数是通用的
modulus = '00e0b509f6259df8642dbc35662901477df22677ec152b5ff68ace615bb7b725152b3ab17a876aea8a5aa76d2e417629ec4ee341f56135fccf695280104e0312ecbda92557c93870114af6c9d05c4f7f0c3685b7a46bee255932575cce10b424d813cfe4875d3e82047b97ddef52741d546b8e289dc6935b3ece0462db0a22b8e7'
nonce = '0CoJUm6Qyw8W8jud'
pubKey = '010001'

text = json.dumps(text)
secKey = createSecretKey(16)
encText = aesEncrypt(aesEncrypt(text, nonce), secKey)
encSecKey = rsaEncrypt(secKey, pubKey, modulus)
data = {
'params': encText,
'encSecKey': encSecKey
}

req = requests.post(url, headers=headers, data=data)
pprint(req.json()) #输出格式化的json数据

#最新评论
for content in req.json()['comments']:
print content['content'].encode('utf-8')
print

print req.json()['total']

其中 Crypto需要用pip安装

1
sudo pip install pycrypto

结果是

参考来源:

网易云音乐新登录API分析

网易云音乐常用API浅析

网易API

个人爬虫常用的几个知识点

1 最常用beautifulsoup4的几个语法

1
2
3
from bs4 import BeautifulSoup

soup = BeautifulSoup(r.text)

如果搜索很多标签用

1
soup.find_all('div', {'class':True, 'id': True, 'title': True}),

然用用for遍历,

搜索单个标签用

1
soup.find('div', {'class':True, 'id': True, 'title': True} )

find和find_all可以多次套用.

获取内容用

1
item.get_text().encode('utf-8'), cmd显示的话用'gbk'

获取div标签的其他属性用

1
item.get('href')

生成所有子标签的列表(list)用contains()函数,

1
childtag = soup.contents()

常用作输出部分子标签的内容. 比如

1
childtag[0].get_text(), childtag[-1].get_text()

2 python中生成13位时间戳的方法

1
2
3
import time

print str(int(time.time() * 1000))

3 requests的req.encoding如果没有获取到正确的编码

可以用以下方法解决

1
req.encoding = 'gb2312'

或者

1
req.encoding = 'utf-8'

或者

1
req.encoding = apparent_encoding

4 模拟登录的时候需要用requsts的senssion()函数.

会话对象让你能够跨请求保持某些参数。它也会在同一个Session实例发出的所有请求之间保持cookies。

1
s = requests.session()

python实现虎扑(hoopchina)自动登录和点亮

1:自动登录的代码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
import time
import sys
from bs4 import BeautifulSoup

# 登录的头部信息
my_headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0',
'Host': 'passport.hupu.com',
'Referer': 'http://passport.hupu.com/pc/login?project=bbs&from=pc',
# 'Cookie ': "_cnzz_CV30020080=buzi_cookie%7Cdc42405e.675b.386f.8396.e1e33e42953b%7C-1; __gads=ID=15c18186521000ee:T=1443103052:S=ALNI_MZ84dig34sQNsPR7xvUlQlQ2H7pFg; _dacevid3=dc42405e.675b.386f.8396.e1e33e42953b; vjuids=8ee89313a.14fffb0fe83.0.32078b2e; vjlast=1443103834.1443103834.30; __dacewap=0xc8e6088248ba6cca; _HUPUSSOID=16672149-8304-4af5-8a1f-924337794477; _CLT=918ebe7bb324d8673460f7af1d701a5c; __dacevst=ce2e6fb5.b67f434d|1443110107591; CNZZDATA30020080=cnzz_eid%3D1429176686-1443106302-http%253A%252F%252Fbbs.hupu.com%252F%26ntime%3D1443106302; _cnzz_CV30020080=buzi_cookie%7Cdc42405e.675b.386f.8396.e1e33e42953b%7C-1"

}
s = requests.session()

# 用户名和密码的post信息
data = {
'username': '用户名',
'password': '密码',
}
time.sleep(2)
# 验证码
verifyimg_url = 'http://passport.hupu.com/pc/verifyimg'

f = open('img.jpg', 'wb')
imgreq = requests.get(verifyimg_url)
f.write(imgreq.content)
f.close()

# 验证码目前需要手动输入, 没有找到很精确辨识验证码的库.
verifyimg = raw_input('verifyimg code:\n').strip()
data['verifyCode'] = verifyimg


loginURL = 'http://passport.hupu.com/pc/login/member.action'
try:
reqlogin = s.post(loginURL, data=data, headers=my_headers)
print reqlogin.json()
uid = str(reqlogin.json()['msg']['uid'])
tag = str(reqlogin.json()['msg']['tag'])
except Exception as e:
print e
sys.exit(1)


url = 'http://passport.hupu.com/m/2/login/crossdomain?uid=' + \
uid + '&freeLogin=true&tag=' + tag
# print url

req = s.get(url, headers=my_headers)
req = s.get('http://passport.hupu.com/pc/redirectJumpUrl', headers=my_headers)

cookies = reqlogin.cookies
time.sleep(2)

# 这个头部信息跟登录时候不一样,不能用那个的.
liangle_headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0',
'Host': 'bbs.hupu.com',
'Referer': 'http://bbs.hupu.com'
}

# 用小黑屋测试, 这个版块每有相应有权限的账号是没法参观的.
banzhu_URL = 'http://bbs.hupu.com/66'

banzhu_req = requests.get(banzhu_URL, headers=liangle_headers, cookies=cookies)
f = open('hupu.html', 'w')
f.write(banzhu_req.content)


banzhu_req.encoding = 'GB2312'
soup = BeautifulSoup(banzhu_req.text, "html.parser")


tiezi_lists = soup.find('table', id='pl').find_all('td', class_='p_title')
for tiezi in tiezi_lists:
print " ".join(tiezi.get_text().split()).encode('utf-8')

今天在v2ex上看到一个大神说,如果遇到验证码登录的网站,一般是现在chrome里面手动登陆一次,然后把cookie拷贝出来,放到头文件里面.
我试了一下果然可以,比需要手动输入方便多了.不过这种方法有个缺陷就是不能短时间内登录多次.

2: 自动点亮的代码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
import time
import sys

# 登录的头部信息
my_headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0',
'Host': 'passport.hupu.com',
'Referer': 'http://passport.hupu.com/pc/login?project=bbs&from=pc'
}

s = requests.session()

# 用户名和密码的post信息
data = {
'username': '',
'password': '',
}
time.sleep(2)
# 验证码
verifyimg_url = 'http://passport.hupu.com/pc/verifyimg'

f = open('img.jpg', 'wb')
imgreq = requests.get(verifyimg_url)
f.write(imgreq.content)
f.close()

# 验证码目前需要手动输入, 没有找到很精确辨识验证码的库.
verifyimg = raw_input('verifyimg code:\n').strip()
data['verifyCode'] = verifyimg


loginURL = 'http://passport.hupu.com/pc/login/member.action'
try:
reqlogin = s.post(loginURL, data=data, headers=my_headers)
print reqlogin.json()['code']
# uid = str(reqlogin.json()['msg']['uid'])
# tag = str(reqlogin.json()['msg']['tag'])
except Exception as e:
print e
sys.exit(1)


# url = 'http://passport.hupu.com/m/2/login/crossdomain?uid=' + \
# uid + '&freeLogin=true&tag=' + tag
# # print url

# req = s.get(url, headers=my_headers)
# req = s.get('http://passport.hupu.com/pc/redirectJumpUrl', headers=my_headers)

cookies = reqlogin.cookies
time.sleep(2)

# 这个头部信息跟登录时候不一样,不能用那个的.
liangle_headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0',
'Host': 'bbs.hupu.com',
'Referer': 'http://bbs.hupu.com'
}

# authorid是被点亮那个回帖人的数字id, fid是板块数字id, pid此回帖人的第多少个帖子, tid是这个主贴的数字id
# 这些数据可以用bs4在网页中得到.
liangle_data = {
'authorid': '16920413',
'fid': '3913',
'pid': '6103',
'state': '1',
'tid': '12615933',
'token': '2e018203ea6a482c17847289989cf66f',
}
liangle_req = s.post('http://bbs.hupu.com/ajax/lights.ajax.php',
data=liangle_data, headers=liangle_headers, cookies=cookies)

print liangle_req.content


code是1表示点亮成功, num是被点亮的次数..

Mac10.10系统下用python抓取淘宝时出现sslv3 alert handshake failure (_ssl.c:590)的解决办法

用python的requests库模拟抓取淘宝页面时会出现如下错误,

1
requests.exceptions.SSLError: [SSL: SSLV3_ALERT_HANDSHAKE_FAILURE] sslv3 alert handshake failure (_ssl.c:590)

换成自带的urllib也是一样报错,
但是在windows系统下同样的代码正常,没有任何错误.

开始以为是openssl的问题, 把系统自带的openssl删除,然后用brew安装最新版本的openssl也不行.

根据stackoverflow大神的帖子,我试了很多方式,最终找到一个解决这个错误的方法.

在python中加入以下语句即可.

1
2
import requests.packages.urllib3.util.ssl_
requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS = 'ALL'

参考链接 : Stack Overflow

sublime text3 侧边栏字体的修改方法

正常情况下sublime text3的侧边栏字体很小,不仔细看的话,很难看清楚.

需要把字号修改的大一些,
网上搜索说,要下载一个插件然后打开主题的配置文件.我找到一个更简单的方法,

  • 在sublime text3的属性里面找到Browse packages,打开插件所在文件夹,进入主题目录,找到要修改的配置文件,拖到sublime text中.

  • 搜索”sidebar_label”, 在后面添加上

1
"font.size": 16,

windows系统下的话, 再加一句字体修改

1
"font.face": "courier",
  • 如果觉得行之间空间太挤,可以通过搜索”sidebar_tree”,将padding[8, 3]后面的数值改成5或者6.
    同一个class下面的”indent”是文件名的缩进, 也可以根据自己的情况修改一下.最后这样,感觉好多了.
  • Copyrights © 2015-2022 小信
  • Visitors: | Views:

请我喝杯咖啡吧~