乡下人产国偷v产偷v自拍,国产午夜片在线观看,婷婷成人亚洲综合国产麻豆,久久综合给合久久狠狠狠9

  • <output id="e9wm2"></output>
    <s id="e9wm2"><nobr id="e9wm2"><ins id="e9wm2"></ins></nobr></s>

    • 分享

      在這個(gè)520特別的日子里,分享幾個(gè)用的上的Python代碼

       quanshan 2019-01-22

      Python520表白神器!心愛的她

      復(fù)制代碼
       1 from turtle import *
       2 from time import sleep
       3 
       4 def go_to(x, y):
       5    up()
       6    goto(x, y)
       7    down()
       8 
       9 
      10 def big_Circle(size):  #函數(shù)用于繪制心的大圓
      11    speed(1)
      12    for i in range(150):
      13        forward(size)
      14        right(0.3)
      15 
      16 def small_Circle(size):  #函數(shù)用于繪制心的小圓
      17    speed(1)
      18    for i in range(210):
      19        forward(size)
      20        right(0.786)
      21 
      22 def line(size):
      23    speed(1)
      24    forward(51*size)
      25 
      26 def heart( x, y, size):
      27    go_to(x, y)
      28    left(150)
      29    begin_fill()
      30    line(size)
      31    big_Circle(size)
      32    small_Circle(size)
      33    left(120)
      34    small_Circle(size)
      35    big_Circle(size)
      36    line(size)
      37    end_fill()
      38 
      39 def arrow():
      40    pensize(10)
      41    setheading(0)
      42    go_to(-400, 0)
      43    left(15)
      44    forward(150)
      45    go_to(339, 178)
      46    forward(150)
      47 
      48 def arrowHead():
      49    pensize(1)
      50    speed(1)
      51    color('red', 'red')
      52    begin_fill()
      53    left(120)
      54    forward(20)
      55    right(150)
      56    forward(35)
      57    right(120)
      58    forward(35)
      59    right(150)
      60    forward(20)
      61    end_fill()
      62 
      63 
      64 def main():
      65    pensize(2)
      66    color('red', 'pink')
      67    #getscreen().tracer(30, 0) #取消注釋后,快速顯示圖案
      68    heart(200, 0, 1)          #畫出第一顆心,前面兩個(gè)參數(shù)控制心的位置,函數(shù)最后一個(gè)參數(shù)可控制心的大小
      69    setheading(0)             #使畫筆的方向朝向x軸正方向
      70    heart(-80, -100, 1.5)     #畫出第二顆心
      71    arrow()                   #畫出穿過兩顆心的直線
      72    arrowHead()               #畫出箭的箭頭
      73    go_to(400, -300)
      74    write("author:520Python", move=True, align="left", font=("宋體", 30, "normal"))
      75    done()
      76 
      77 main()
      復(fù)制代碼

      如果表白失敗了!那么...........

      Python爬取妹子圖

      安慰你幼小的心靈,畢竟今天都是秀秀秀秀,一心只為“圣賢書”,兩耳不聞窗外事

      復(fù)制代碼
       1 #!/usr/bin/env python
       2 # coding=utf-8
       3 import os
       4 import time
       5 import threading
       6 from multiprocessing import Pool, cpu_count
       7 
       8 import requests
       9 from bs4 import BeautifulSoup
      10 
      11 headers = {
      12     'X-Requested-With': 'XMLHttpRequest',
      13     'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
      14                   'Chrome/56.0.2924.87 Safari/537.36',
      15     'Referer': "http://www."
      16 }
      17 dir_path = r"E:\mmjpg"      # 下載圖片保存路徑
      18 def save_pic(pic_src, pic_cnt):
      19     """ 將圖片下載到本地文件夾 """
      20     try:
      21         img = requests.get(pic_src, headers=headers, timeout=10)
      22         imgname = "pic_cnt_{}.jpg".format(pic_cnt + 1)
      23         with open(imgname, 'ab') as f:
      24             f.write(img.content)
      25             print(imgname)
      26     except Exception as e:
      27         print(e)
      28 def make_dir(folder_name):
      29     """ 新建套圖文件夾并切換到該目錄下 """
      30     path = os.path.join(dir_path, folder_name)
      31     # 如果目錄已經(jīng)存在就不用再次爬取了,去重,提高效率。存在返回 False,否則反之
      32     if not os.path.exists(path):
      33         os.makedirs(path)
      34         print(path)
      35         os.chdir(path)
      36         return True
      37     print("Folder has existed!")
      38     return False
      39 def delete_empty_dir(dir):
      40     """ 如果程序半路中斷的話,可能存在已經(jīng)新建好文件夾但是仍沒有下載的圖片的情況
      41     但此時(shí)文件夾已經(jīng)存在所以會(huì)忽略該套圖的下載,此時(shí)要?jiǎng)h除空文件夾 """
      42     if os.path.exists(dir):
      43         if os.path.isdir(dir):
      44             for d in os.listdir(dir):
      45                 path = os.path.join(dir, d)     # 組裝下一級(jí)地址
      46                 if os.path.isdir(path):
      47                     delete_empty_dir(path)      # 遞歸刪除空文件夾
      48         if not os.listdir(dir):
      49             os.rmdir(dir)
      50             print("remove the empty dir: {}".format(dir))
      51     else:
      52         print("Please start your performance!") # 請(qǐng)開始你的表演
      53 
      54 lock = threading.Lock()     # 全局資源鎖
      55 def urls_crawler(url):
      56     """ 爬蟲入口,主要爬取操作 """
      57     try:
      58         r = requests.get(url, headers=headers, timeout=10).text
      59         # 套圖名,也作為文件夾名
      60         folder_name = BeautifulSoup(r, 'lxml').find('h2').text.encode('ISO-8859-1').decode('utf-8')
      61         with lock:
      62             if make_dir(folder_name):
      63                 # 套圖張數(shù)
      64                 max_count = BeautifulSoup(r, 'lxml').find('div', class_='page').find_all('a')[-2].get_text()
      65                 # 套圖頁面
      66                 page_urls = [url + "/" + str(i) for i in range(1, int(max_count) + 1)]
      67                 # 圖片地址
      68                 img_urls = []
      69                 for index, page_url in enumerate(page_urls):
      70                     result = requests.get(page_url, headers=headers, timeout=10).text
      71                     # 最后一張圖片沒有a標(biāo)簽直接就是img所以分開解析
      72                     if index + 1 < len(page_urls):
      73                         img_url = BeautifulSoup(result, 'lxml').find('div', class_='content').find('a').img['src']
      74                         img_urls.append(img_url)
      75                     else:
      76                         img_url = BeautifulSoup(result, 'lxml').find('div', class_='content').find('img')['src']
      77                         img_urls.append(img_url)
      78 
      79                 for cnt, url in enumerate(img_urls):
      80                     save_pic(url, cnt)
      81     except Exception as e:
      82         print(e)
      83 if __name__ == "__main__":
      84     urls = ['http:///mm/{cnt}'.format(cnt=cnt) for cnt in range(1, 953)]
      85     pool = Pool(processes=cpu_count())
      86     try:
      87         delete_empty_dir(dir_path)
      88         pool.map(urls_crawler, urls)
      89     except Exception as e:
      90         time.sleep(30)
      91         delete_empty_dir(dir_path)
      92         pool.map(urls_crawler, urls
      復(fù)制代碼

      Python爬取小說

      復(fù)制代碼
       1 import urllib.request
       2 import re
       3 # 1 獲取主頁源代碼
       4 # 2 獲取章節(jié)超鏈接
       5 # 3 獲取章節(jié)超鏈接源碼
       6 # 4 獲取小說內(nèi)容
       7 # 5 下載,文件操作
       8 
       9 # 駝峰命名法
      10 # 獲取小說內(nèi)容
      11 def getNovertContent():
      12     # <http.client.HTTPResponse object at 0x000001DFD017F400>
      13     html = urllib.request.urlopen("http://www./book/0/269").read()
      14     html = html.decode("gbk")
      15     # 不加括號(hào)  不匹配
      16     # 正則表達(dá)式  .*?  匹配所有
      17     reg = r'<li><a href="(.*?)" title=".*?">(.*?)</a></li>'
      18     # 增加效率的
      19     reg = re.compile(reg)
      20     urls = re.findall(reg,html)
      21     # print(urls)
      22     # 列表
      23     # [(http://www./book/0/269/78850.html,第一章 山邊小村),
      24     # (http://www./book/0/269/78854.html,第二章 青牛鎮(zhèn))]
      25     for url in urls:
      26         # 章節(jié)的URL地址
      27         novel_url = url[0]
      28         # 章節(jié)標(biāo)題
      29         novel_title = url[1]
      30 
      31         chapt = urllib.request.urlopen(novel_url).read()
      32         chapt_html = chapt.decode("gbk")
      33         # r 表示原生字符串   \ \\d  r"\d"
      34         reg = r'</script>    (.*?)<script type="text/javascript">'
      35         # S 代表多行匹配
      36         reg = re.compile(reg,re.S)
      37         chapt_content = re.findall(reg,chapt_html)
      38         # print(chapt_content)
      39         # 列表["   &nbsp二愣子睜大著雙眼,直直望著茅草和爛泥糊成的<br />"]
      40 
      41         # 第一個(gè)參數(shù)   要替換的字符串   替換后的字符串
      42         chapt_content = chapt_content[0].replace("    ","")
      43         # print(chapt_content)    字符串  二愣子睜大著雙眼,直直望著茅草和爛泥糊成的<br />
      44         chapt_content = chapt_content.replace("<br />","")
      45 
      46         print("正在保存 %s"%novel_title)
      47         # w 讀寫模式  wb
      48         # f = open("{}.txt".format(novel_title),'w')
      49         # f.write(chapt_content)
      50 
      51         with open("{}.txt".format(novel_title),'w') as f:
      52             f.write(chapt_content)
      53 
      54         # f.close()
      55 
      56 getNovertContent()
      復(fù)制代碼

       

      但是這些都僅僅只是心靈上的安慰,咱們需要充實(shí)自己!

      Python爬取智聯(lián)招聘

      尋求高薪工作,從此走向人生巔峰,贏娶白富美。更要學(xué)好Python!

      復(fù)制代碼
        1 #-*- coding: utf-8 -*-
        2 import re
        3 import csv
        4 import requests
        5 from tqdm import tqdm
        6 from urllib.parse import urlencode
        7 from requests.exceptions import RequestException
        8 
        9 def get_one_page(city, keyword, region, page):
       10    '''
       11    獲取網(wǎng)頁html內(nèi)容并返回
       12    '''
       13    paras = {
       14        'jl': city,         # 搜索城市
       15        'kw': keyword,      # 搜索關(guān)鍵詞 
       16        'isadv': 0,         # 是否打開更詳細(xì)搜索選項(xiàng)
       17        'isfilter': 1,      # 是否對(duì)結(jié)果過濾
       18        'p': page,          # 頁數(shù)
       19        're': region        # region的縮寫,地區(qū),2005代表海淀
       20    }
       21 
       22    headers = {
       23        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
       24        'Host': 'sou.',
       25        'Referer': 'https://www./',
       26        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
       27        'Accept-Encoding': 'gzip, deflate, br',
       28        'Accept-Language': 'zh-CN,zh;q=0.9'
       29    }
       30 
       31    url = 'https://sou./jobs/searchresult.ashx?' + urlencode(paras)
       32    try:
       33        # 獲取網(wǎng)頁內(nèi)容,返回html數(shù)據(jù)
       34        response = requests.get(url, headers=headers)
       35        # 通過狀態(tài)碼判斷是否獲取成功
       36        if response.status_code == 200:
       37            return response.text
       38        return None
       39    except RequestException as e:
       40        return None
       41 
       42 def parse_one_page(html):
       43    '''
       44    解析HTML代碼,提取有用信息并返回
       45    '''
       46    # 正則表達(dá)式進(jìn)行解析
       47    pattern = re.compile('<a style=.*? target="_blank">(.*?)</a>.*?'        # 匹配職位信息
       48        '<td class="gsmc"><a href="(.*?)" target="_blank">(.*?)</a>.*?'     # 匹配公司網(wǎng)址和公司名稱
       49        '<td class="zwyx">(.*?)</td>', re.S)                                # 匹配月薪      
       50 
       51    # 匹配所有符合條件的內(nèi)容
       52    items = re.findall(pattern, html)   
       53 
       54    for item in items:
       55        job_name = item[0]
       56        job_name = job_name.replace('<b>', '')
       57        job_name = job_name.replace('</b>', '')
       58        yield {
       59            'job': job_name,
       60            'website': item[1],
       61            'company': item[2],
       62            'salary': item[3]
       63        }
       64 
       65 def write_csv_file(path, headers, rows):
       66    '''
       67    將表頭和行寫入csv文件
       68    '''
       69    # 加入encoding防止中文寫入報(bào)錯(cuò)
       70    # newline參數(shù)防止每寫入一行都多一個(gè)空行
       71    with open(path, 'a', encoding='gb18030', newline='') as f:
       72        f_csv = csv.DictWriter(f, headers)
       73        f_csv.writeheader()
       74        f_csv.writerows(rows)
       75 
       76 def write_csv_headers(path, headers):
       77    '''
       78    寫入表頭
       79    '''
       80    with open(path, 'a', encoding='gb18030', newline='') as f:
       81        f_csv = csv.DictWriter(f, headers)
       82        f_csv.writeheader()
       83 
       84 def write_csv_rows(path, headers, rows):
       85    '''
       86    寫入行
       87    '''
       88    with open(path, 'a', encoding='gb18030', newline='') as f:
       89        f_csv = csv.DictWriter(f, headers)
       90        f_csv.writerows(rows)
       91 
       92 def main(city, keyword, region, pages):
       93    '''
       94    主函數(shù)
       95    '''
       96    filename = 'zl_' + city + '_' + keyword + '.csv'
       97    headers = ['job', 'website', 'company', 'salary']
       98    write_csv_headers(filename, headers)
       99    for i in tqdm(range(pages)):
      100        '''
      101        獲取該頁中所有職位信息,寫入csv文件
      102        '''
      103        jobs = []
      104        html = get_one_page(city, keyword, region, i)
      105        items = parse_one_page(html)
      106        for item in items:
      107            jobs.append(item)
      108        write_csv_rows(filename, headers, jobs)
      109 
      110 if __name__ == '__main__':
      111    main('北京', 'python工程師', 2005, 10)
      復(fù)制代碼

       

        本站是提供個(gè)人知識(shí)管理的網(wǎng)絡(luò)存儲(chǔ)空間,所有內(nèi)容均由用戶發(fā)布,不代表本站觀點(diǎn)。請(qǐng)注意甄別內(nèi)容中的聯(lián)系方式、誘導(dǎo)購買等信息,謹(jǐn)防詐騙。如發(fā)現(xiàn)有害或侵權(quán)內(nèi)容,請(qǐng)點(diǎn)擊一鍵舉報(bào)。
        轉(zhuǎn)藏 分享 獻(xiàn)花(0

        0條評(píng)論

        發(fā)表

        請(qǐng)遵守用戶 評(píng)論公約

        類似文章 更多