Python網路爬蟲 - 爬取中證網銀行相關信息

来源:https://www.cnblogs.com/HanaKoo/archive/2022/03/28/16066184.html
-Advertisement-
Play Games

最終版:07_中證網(Plus -Pro).py # coding=utf-8 import requests from bs4 import BeautifulSoup import io import sys import os sys.stdout = io.TextIOWrapper(sys ...


最終版:07_中證網(Plus -Pro).py

# coding=utf-8
import requests
from bs4 import BeautifulSoup
import io
import sys
import os

sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030')  # 改變標準輸出的預設編碼

for qq in range(8):
    # query = input("【中證網】請輸入你想搜索的內容:")
    query = '蘇州銀行'

    #年份
    year = [2014,2015,2016,2017,2018,2019,2020,2021]
    #總頁數
    pages = [2,1,1,1,11,1,19,7]

    year = year[qq]
    pages = pages[qq]

    if not os.path.isdir(f'D:/桌面/爬蟲-銀行/中國證券網/{query}'):  # 如果沒有此文件夾
        os.mkdir(f'D:/桌面/爬蟲-銀行/中國證券網/{query}')  # 創建此文件夾

    m = 0
    for p in range(1, pages + 1):
        url = f'http://search.cs.com.cn/search?page={p}&channelid=215308&searchword={query}&keyword={query}&token=12.1462412070719.47&perpage=10&outlinepage=5&&andsen=&total=&orsen=&exclude=&searchscope=&timescope=&timescopecolumn=&orderby=&timeline=={year}'

        dic = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"}

        resp = requests.get(url, headers=dic, )
        resp.encoding = 'utf-8'
        # print(resp)

        print(f'\n>>>--------------------第{p}頁---------------------<<<\n')
        print(f'\n>>>--------------------第{p}頁---------------------<<<\n')
        print(f'\n>>>--------------------第{p}頁---------------------<<<\n')

        # print(resp.text)
        page = BeautifulSoup(resp.text, "html.parser")  # 指定html解析器

        alist = page.find_all("table")
        datalist = []
        for ii in alist:
            ss=ii.find('td', style='font-size: 12px;line-height: 24px;color: #333333;margin-top: 4px;')
            # print('ss=\n\n',ss)
            if ss != None:
                ss = ss.get_text()
                datalist.append(ss)

        # print('data:',datalist,len(datalist))

        if not os.path.isdir(f'D:/桌面/爬蟲-銀行/中國證券網/{query}/{year}'):  # 如果沒有此文件夾
            os.mkdir(f'D:/桌面/爬蟲-銀行/中國證券網/{query}/{year}')  # 創建此文件夾

        for ii in range(len(datalist)):
            fp = open(f'D:/桌面/爬蟲-銀行/中國證券網/{query}/{year}/({year}){ii + m + 1}.txt', 'w+', encoding='utf-8')
            fp.write(datalist[ii] + '\n')  # 只包含文本
            print(datalist[ii])
            print(f'\n> > >{year}年,第{p}頁,第{ii + 1}篇,成功! < < <')
            fp.close()
        m = m + len(datalist) + 1

print('----------------------------')
print(f'------\n{year}年,爬取完畢----')
print('----------------------------')

歷史優化記錄:01_中證網.py

# coding=utf-8
import requests
from bs4 import BeautifulSoup
import io
import sys

sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030')  # 改變標準輸出的預設編碼


query = input("【中證網】請輸入你想搜索的內容:")
pages = int(input("要爬取的頁數(不小於1):"))
if pages < 1:
    exit()

url = f'http://search.cs.com.cn/search?channelid=215308&perpage=&templet=&token=12.1462412070719.47&searchword={query}'

dic = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 "
                  "Safari/537.36 SLBrowser/7.0.0.6241 SLBChan/30"}

resp = requests.get(url, headers=dic, )
resp.encoding = 'utf-8'
# print(resp)

# print(resp.text)
page = BeautifulSoup(resp.text, "html.parser")  # 指定html解析器

alist = page.find("table").find_all("a")

# print(alist)

weblist = []
for a in alist:
    if a.get('href')[:5] == "https":
        weblist.append(a.get('href'))

# ----------------單頁每個文章---------------------------------
m = 0

for ii in range(len(weblist)):

    url_a = weblist[ii]

    # print('0=',url_a)

    dic_a = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 "
                      "Safari/537.36 SLBrowser/7.0.0.6241 SLBChan/30"}

    resp_a = requests.get(url_a, headers=dic_a, )
    resp_a.encoding = 'gbk'

    # print('New:\n',resp_a.text)

    page_a = BeautifulSoup(resp_a.text, "html.parser")  # 指定html解析器

    # print('123:\n',page_a)

    page_b = page_a.find('section').find_all('p')

    # print(page_b)
    fp=open(f'D:/桌面/爬蟲-銀行/中國證券網/中國銀行/{ii+1}.txt','w+',encoding='utf-8')

    txt_list = []
    for txt_a in page_b:
        # print(txt_a.text)
        txt_list.append(txt_a.text)

    # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    # ++++++++++++++++++++++文本寫入+++++++++++++++++++++++++++++++

    for i in range(len(txt_list)):
        fp.write(txt_list[i] + '\n')  # 只包含文本

    fp.close()
    print(f'>>{ii+1}成功!')
    m = ii+1

# +-+++-----------++++++++++-----多頁------++++++++++++----------++++

if pages > 1:
    for p in range(pages):
        url_s = f"http://search.cs.com.cn/search?page={p+1}&channelid=215308&searchword={query}"

        resp = requests.get(url, headers=dic, )
        resp.encoding = 'utf-8'
        # print(resp)

        # print(resp.text)
        page = BeautifulSoup(resp.text, "html.parser")  # 指定html解析器

        alist = page.find("table").find_all("a")

        # print(alist)

        weblist = []
        for a in alist:
            if a.get('href')[:5] == "https":
                weblist.append(a.get('href'))

        # ----------------單頁每個文章---------------------------------

        for ii in range(len(weblist)):

            url_a = weblist[ii]

            # print('0=',url_a)

            dic_a = {
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 "
                              "Safari/537.36 SLBrowser/7.0.0.6241 SLBChan/30"}

            resp_a = requests.get(url_a, headers=dic_a, )
            resp_a.encoding = 'gbk'

            # print('New:\n',resp_a.text)

            page_a = BeautifulSoup(resp_a.text, "html.parser")  # 指定html解析器

            # print('123:\n',page_a)

            page_b = page_a.find('section').find_all('p')

            # print(page_b)
            fp = open(f'D:/桌面/爬蟲-銀行/中國證券網/中國銀行/{ii + 1 + m}.txt', 'w+', encoding='utf-8')

            txt_list = []
            for txt_a in page_b:
                # print(txt_a.text)
                txt_list.append(txt_a.text)

            # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
            # ++++++++++++++++++++++文本寫入+++++++++++++++++++++++++++++++

            for i in range(len(txt_list)):
                fp.write(txt_list[i] + '\n')  # 只包含文本

            print(f'>>{ii + 1 + m}成功!')
            m = m + ii + 1


fp.close()

print('---------------\n>>>爬取完畢<<<')

歷史優化記錄:02_中證網.py

# coding=utf-8
import requests
from bs4 import BeautifulSoup
import io
import sys

sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030')  # 改變標準輸出的預設編碼


query = input("【中證網】請輸入你想搜索的內容:")
pages = int(input("要爬取的頁數(不小於1):"))
if pages < 1:
    exit()

url = f'http://search.cs.com.cn/search?page=1&channelid=215308&searchword={query}'

dic = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 "
                  "Safari/537.36 SLBrowser/7.0.0.6241 SLBChan/30"}

resp = requests.get(url, headers=dic, )
resp.encoding = 'utf-8'
# print(resp)

# print(resp.text)
page = BeautifulSoup(resp.text, "html.parser")  # 指定html解析器

alist = page.find("table").find_all("a")

# print(alist)

weblist = []
for a in alist:
    if a.get('href')[:5] == "https":
        weblist.append(a.get('href'))

# ----------------單頁每個文章---------------------------------
m = 0

for ii in range(len(weblist)):

    url_a = weblist[ii]

    # print('0=',url_a)

    dic_a = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 "
                      "Safari/537.36 SLBrowser/7.0.0.6241 SLBChan/30"}

    resp_a = requests.get(url_a, headers=dic_a, )
    resp_a.encoding = 'gbk'

    # print('New:\n',resp_a.text)

    page_a = BeautifulSoup(resp_a.text, "html.parser")  # 指定html解析器

    # print('123:\n',page_a)

    page_b = page_a.find('section').find_all('p')

    # print(page_b)
    fp=open(f'D:/桌面/爬蟲-銀行/中國證券網/中國銀行/0/(2021){ii+1}.txt','w+',encoding='utf-8')

    txt_list = []
    for txt_a in page_b:
        # print(txt_a.text)
        txt_list.append(txt_a.text)

    # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    # ++++++++++++++++++++++文本寫入+++++++++++++++++++++++++++++++

    for i in range(len(txt_list)):
        fp.write(txt_list[i] + '\n')  # 只包含文本

    fp.close()
    print(f'>>{ii+1}成功!')
    m = ii+1

# +-+++-----------++++++++++-----多頁------++++++++++++----------++++
# +-+++-----------++++++++++-----多頁------++++++++++++----------++++

if pages > 1:
    for p in range(pages):
        url_s = f"http://search.cs.com.cn/search?page={p+1}&channelid=215308&searchword={query}"

        resp = requests.get(url, headers=dic, )
        resp.encoding = 'utf-8'
        # print(resp)

        # print(resp.text)
        page = BeautifulSoup(resp.text, "html.parser")  # 指定html解析器

        alist = page.find("table").find_all("a")

        # print(alist)

        weblist = []
        for a in alist:
            if a.get('href')[:5] == "https":
                weblist.append(a.get('href'))

        # ----------------單頁每個文章---------------------------------

        for ii in range(len(weblist)):

            url_a = weblist[ii]

            # print('0=',url_a)

            dic_a = {
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 "
                              "Safari/537.36 SLBrowser/7.0.0.6241 SLBChan/30"}

            resp_a = requests.get(url_a, headers=dic_a, )
            resp_a.encoding = 'gbk'

            # print('New:\n',resp_a.text)

            page_a = BeautifulSoup(resp_a.text, "html.parser")  # 指定html解析器

            # print('123:\n',page_a)

            page_b = page_a.find('section').find_all('p')

            # print(page_b)
            fp = open(f'D:/桌面/爬蟲-銀行/中國證券網/中國銀行/0/(2021){ii + 1 + m}.txt', 'w+', encoding='utf-8')

            txt_list = []
            for txt_a in page_b:
                # print(txt_a.text)
                txt_list.append(txt_a.text)

            # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
            # ++++++++++++++++++++++文本寫入+++++++++++++++++++++++++++++++

            for i in range(len(txt_list)):
                fp.write(txt_list[i] + '\n')  # 只包含文本

            print(f'>>{ii + 1 + m}成功!')
        m = m + ii + 1


fp.close()

print('---------------\n>>>爬取完畢<<<')

歷史優化記錄:03_中證網.py

# coding=utf-8
import requests
from bs4 import BeautifulSoup
import io
import sys

sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030')  # 改變標準輸出的預設編碼

query = input("【中證網】請輸入你想搜索的內容:")
pages = int(input("要爬取的頁數(不小於1):"))
if pages < 1:
    exit()

m = 0
for p in range(1,pages+1):
    url = f'http://search.cs.com.cn/search?page={p}&channelid=215308&searchword={query}&perpage=10&outlinepage=5&&andsen=&total=&orsen=&exclude=&searchscope=&timescope=&timescopecolumn=&orderby=&timeline==2021'

    dic = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"}

    resp = requests.get(url, headers=dic, )
    resp.encoding = 'utf-8'
    # print(resp)

    print(f'\n>>>--------------------第{p}頁---------------------<<<\n')
    print(f'\n>>>--------------------第{p}頁---------------------<<<\n')
    print(f'\n>>>--------------------第{p}頁---------------------<<<\n')

    # print(resp.text)
    page = BeautifulSoup(resp.text, "html.parser")  # 指定html解析器

    alist = page.find("table").find_all('a')

    weblist = []

    for a in alist:
        if a.get('href')[:5] == "https":
            weblist.append(a.get('href'))
    # print('weblist==',weblist)
# ----------------單頁每個文章---------------------------------

    for ii in range(len(weblist)):

        url_a = weblist[ii]

        # print('0=',url_a)

        dic_a = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"}

        resp_a = requests.get(url_a, headers=dic_a, )
        resp_a.encoding = 'gbk'

        # print('New:\n',resp_a.text)

        page_a = BeautifulSoup(resp_a.text, "html.parser")  # 指定html解析器

        # print('123:\n',page_a)

        page_b = page_a.find('section').find_all('p')

        # print(page_b)
        fp=open(f'D:/桌面/爬蟲-銀行/中國證券網/中國銀行/2021/(2021){ii+m+1}.txt','w+',encoding='utf-8')

        txt_list = []
        for txt_a in page_b:
            # print('txt_a===',txt_a.text)
            txt_list.append(txt_a.text)
        print(f'\n-++++++++++++++++++第{ii+1}篇文章++++++++++++++++-\n',txt_list,len(txt_list))
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        # ++++++++++++++++++++++文本寫入+++++++++++++++++++++++++++++++

        for i in range(len(txt_list)):
            fp.write(txt_list[i] + '\n')  # 只包含文本

        # print('-----------------------------------')
        print(f'\n> > >{ii+1}成功! < < <')
        fp.close()
    m=m+len(weblist)+1


print('---------------\n>>>爬取完畢<<<')

歷史優化記錄:04_中證網(網址篩選問題).py

# coding=utf-8
import requests
from bs4 import BeautifulSoup
import io
import sys

sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030')  # 改變標準輸出的預設編碼

query = input("【中證網】請輸入你想搜索的內容:")
pages = int(input("要爬取的頁數(不小於1):"))
if pages < 1:
    exit()

m = 0
for p in range(1,pages+1):
    url = f'http://search.cs.com.cn/search?page={pages}&channelid=215308&searchword={query}&keyword={query}&token=12.1462412070719.47&perpage=10&outlinepage=5&&andsen=&total=&orsen=&exclude=&searchscope=&timescope=&timescopecolumn=&orderby=&timeline==2020'

    dic = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"}

    resp = requests.get(url, headers=dic, )
    resp.encoding = 'utf-8'
    # print(resp)

    print(f'\n>>>--------------------第{p}頁---------------------<<<\n')
    print(f'\n>>>--------------------第{p}頁---------------------<<<\n')
    print(f'\n>>>--------------------第{p}頁---------------------<<<\n')

    # print(resp.text)
    page = BeautifulSoup(resp.text, "html.parser")  # 指定html解析器

    alist = page.find("table").find_all('a')

    print('alist:',alist)

    weblist = []

    for a in alist:
        if a.get('href')[4:] == "http":
            weblist.append(a.get('href'))

    print('weblist==',weblist)

# ----------------單頁每個文章---------------------------------

    for ii in range(len(weblist)):

        url_a = weblist[ii]

        # print('0=',url_a)

        dic_a = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"}

        resp_a = requests.get(url_a, headers=dic_a, )
        resp_a.encoding = 'gbk'

        # print('New:\n',resp_a.text)

        page_a = BeautifulSoup(resp_a.text, "html.parser")  # 指定html解析器

        # print('123:\n',page_a)

        page_b = page_a.find('section').find_all('p')

        # print(page_b)
        fp=open(f'D:/桌面/爬蟲-銀行/中國證券網/中國銀行/2020/(2020){ii+m+1}.txt','w+',encoding='utf-8')

        txt_list = []
        for txt_a in page_b:
            # print('txt_a===',txt_a.text)
            txt_list.append(txt_a.text)
        print(f'\n-++++++++++++++++++第{ii+1}篇文章++++++++++++++++-\n',txt_list,len(txt_list))
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        # ++++++++++++++++++++++文本寫入+++++++++++++++++++++++++++++++

        for i in range(len(txt_list)):
            fp.write(txt_list[i] + '\n')  # 只包含文本

        # print('-----------------------------------')
        print(f'\n> > >{ii+1}成功! < < <')
        fp.close()
    m=m+len(weblist)+1


print('---------------\n>>>爬取完畢<<<')

歷史優化記錄:05_中證網.py

# coding=utf-8
import requests
from bs4 import BeautifulSoup
import io
import sys

sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030')  # 改變標準輸出的預設編碼

query = input("【中證網】請輸入你想搜索的內容:")
year = int(input('要爬取的年份:'))
pages = int(input("要爬取的頁數(不小於1):"))

if pages < 1:
    exit()

m = 0
for p in range(1, pages + 1):
    url = f'http://search.cs.com.cn/search?page={p}&channelid=215308&searchword={query}&keyword={query}&token=12.1462412070719.47&perpage=10&outlinepage=5&&andsen=&total=&orsen=&exclude=&searchscope=&timescope=&timescopecolumn=&orderby=&timeline=={year}'

    dic = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"}

    resp = requests.get(url, headers=dic, )
    resp.encoding = 'utf-8'
    # print(resp)

    print(f'\n>>>--------------------第{p}頁---------------------<<<\n')
    print(f'\n>>>--------------------第{p}頁---------------------<<<\n')
    print(f'\n>>>--------------------第{p}頁---------------------<<<\n')

    # print(resp.text)
    page = BeautifulSoup(resp.text, "html.parser")  # 指定html解析器

    alist = page.find("table").find('tr').find_all('a')

    # print('alist:', alist)

    weblist = []

    for a in alist:
        if a.get('href')[:4] == "http":
            weblist.append(a.get('href'))

    print('weblist==', weblist)

    # ----------------單頁每個文章---------------------------------

    for ii in range(len(weblist)):

        url_a = weblist[ii]

        # print('0=',url_a)

        dic_a = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"}

        resp_a = requests.get(url_a, headers=dic_a, )
        resp_a.encoding = 'gbk'

        # print('New:\n',resp_a.text)

        page_a = BeautifulSoup(resp_a.text, "html.parser")  # 指定html解析器

        # print('123:\n',page_a)

        page_b = page_a.find_all('p')

        # print(page_b)
        fp = open(f'D:/桌面/爬蟲-銀行/中國證券網/中國銀行/{year}/({year}){ii + m + 1}.txt', 'w+', encoding='utf-8')

        txt_list = []
        for txt_a in page_b:
            # print('txt_a===',txt_a.text)
            txt_list.append(txt_a.text)
        print(f'\n-++++++++++++++++++第{ii + 1}篇文章++++++++++++++++-\n', txt_list, len(txt_list))
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        # ++++++++++++++++++++++文本寫入+++++++++++++++++++++++++++++++

        for i in range(len(txt_list)):
            fp.write(txt_list[i] + '\n')  # 只包含文本

        # print('-----------------------------------')
        print(f'\n> > >{ii + 1}成功! < < <')
        fp.close()
    m = m + len(weblist) + 1

print('---------------\n>>>爬取完畢<<<')

歷史優化記錄:06_中證網(Plus).py

# coding=utf-8
import requests
from bs4 import BeautifulSoup
import io
import sys
import os

sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030')  # 改變標準輸出的預設編碼

# query = input("【中證網】請輸入你想搜索的內容:")
query = '交通銀行'
year = int(input('要爬取的年份:'))
pages = int(input("要爬取的頁數(不小於1):"))

if pages < 1:
    exit()

m = 0
for p in range(1, pages + 1):
    url = f'http://search.cs.com.cn/search?page={p}&channelid=215308&searchword={query}&keyword={query}&token=12.1462412070719.47&perpage=10&outlinepage=5&&andsen=&total=&orsen=&exclude=&searchscope=&timescope=&timescopecolumn=&orderby=&timeline=={year}'

    dic = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"}

    resp = requests.get(url, headers=dic, )
    resp.encoding = 'utf-8'
    # print(resp)

    print(f'\n>>>--------------------第{p}頁---------------------<<<\n')
    print(f'\n>>>--------------------第{p}頁---------------------<<<\n')
    print(f'\n>>>--------------------第{p}頁---------------------<<<\n')

    # print(resp.text)
    page = BeautifulSoup(resp.text, "html.parser")  # 指定html解析器

    alist = page.find_all("table")
    datalist = []
    for ii in alist:
        ss=ii.find('td', style='font-size: 12px;line-height: 24px;color: #333333;margin-top: 4px;')
        # print('ss=\n\n',ss)
        if ss != None:
            ss = ss.get_text()
            datalist.append(ss)

    # print('data:',datalist,len(datalist))

    if not os.path.isdir(f'D:/桌面/爬蟲-銀行/中國證券網/{query}/{year}'):  # 如果沒有此文件夾
        os.mkdir(f'D:/桌面/爬蟲-銀行/中國證券網/{query}/{year}')  # 創建此文件夾

    for ii in range(len(datalist)):
        fp = open(f'D:/桌面/爬蟲-銀行/中國證券網/{query}/{year}/({year}){ii + m + 1}.txt', 'w+', encoding='utf-8')
        fp.write(datalist[ii] + '\n')  # 只包含文本
        print(datalist[ii])
        print(f'\n> > >第{p}頁,第{ii + 1}篇,成功! < < <')
        fp.close()
    m = m + len(datalist) + 1

print('----------------------------')
print(f'------\n{year}年,爬取完畢----')
print('----------------------------')
Love for Ever Day
您的分享是我們最大的動力!

-Advertisement-
Play Games
更多相關文章
  • 來源:blog.csdn.net/u011291072/article/details/81813662 前言 目前開發的SpringBoot項目在啟動的時候需要預載入一些資源。而如何實現啟動過程中執行代碼,或啟動成功後執行,是有很多種方式可以選擇,我們可以在static代碼塊中實現,也可以在構造方 ...
  • 1、導語 大家好,好久不見。又到每日分享Python小技能的時候了。最近因為疫情影響,所以更新內容比較慢…今天周一,就來更新一波,心血來潮,是時候上線經營了。其實也沒想到有啥好分享的,不如分享一些乾貨給大家,今天就分享一下如何識別圖片驗證碼併發送手機驗證碼的思路和代碼(基於python,對於過程中的 ...
  • package Day18; import java.util.Scanner; import java.util.Set; import java.util.TreeMap; /* * 分析: * A:首先鍵盤輸入一個字元串 * B:創建一個TreeMap集合 鍵唯一 鍵值覆蓋--自然排序 * C ...
  • 先看一段代碼: int i = 0; int a = i++; sout("a的值是:"+i); sout("i的值是:"+i); 最終的編譯之後的核心位元組碼如下 L0 BITPUSH 0 //將常量0壓入操作棧 ISTORE_1 //將當前棧頂元素,彈出並保存到局部變數表的slot_1中 L1 I ...
  • 首先 每個人都會有一段特別懷念而又難忘的時光吧,我到現在依然記得大學時光的美好。讓我們一起來懷念一下時光吧。今天這個故事從一張校園卡開始,相信很多小伙伴們都用過校園卡,它是一種其個人身份認證、校園消費、數據共用等多功能於一體的校園信息集成與管理系統。在它裡面存儲著大量的數據,包含:學生消費、宿舍門禁 ...
  • OAuth2客戶端按照它們與授權伺服器進行安全認證的能力可以分為機密類型(Confidential)和公共類型(Public)。 機密類型的自身會有個密碼憑據,比如Web伺服器後端程式;而公共類型則沒有密碼憑據,純瀏覽器前端應用或者移動客戶端應用大都屬於這一種類型。不管是哪一種,它們都有客戶端ID( ...
  • 一、LinkedList集合特有的方法 方法名說明 public void addFirst(E e) 在該鏈表的頭部插入指定的元素 public void addLast(E e) 在該鏈表的尾部追加指定的元素 public E getLast() 返回此鏈表的最後一個元素 public E ge ...
  • 對於spring-security來說,當你訪問一個受保護資源時,需要檢查你的token,當沒有傳遞,或者傳遞的token有錯誤時,將出現401unauthorized異常;當你傳遞的token是有效的,但解析後並沒有訪問這個資源的許可權時,將返回403forbidden的異常,而你通過攔截器@Res ...
一周排行
    -Advertisement-
    Play Games
  • 移動開發(一):使用.NET MAUI開發第一個安卓APP 對於工作多年的C#程式員來說,近來想嘗試開發一款安卓APP,考慮了很久最終選擇使用.NET MAUI這個微軟官方的框架來嘗試體驗開發安卓APP,畢竟是使用Visual Studio開發工具,使用起來也比較的順手,結合微軟官方的教程進行了安卓 ...
  • 前言 QuestPDF 是一個開源 .NET 庫,用於生成 PDF 文檔。使用了C# Fluent API方式可簡化開發、減少錯誤並提高工作效率。利用它可以輕鬆生成 PDF 報告、發票、導出文件等。 項目介紹 QuestPDF 是一個革命性的開源 .NET 庫,它徹底改變了我們生成 PDF 文檔的方 ...
  • 項目地址 項目後端地址: https://github.com/ZyPLJ/ZYTteeHole 項目前端頁面地址: ZyPLJ/TreeHoleVue (github.com) https://github.com/ZyPLJ/TreeHoleVue 目前項目測試訪問地址: http://tree ...
  • 話不多說,直接開乾 一.下載 1.官方鏈接下載: https://www.microsoft.com/zh-cn/sql-server/sql-server-downloads 2.在下載目錄中找到下麵這個小的安裝包 SQL2022-SSEI-Dev.exe,運行開始下載SQL server; 二. ...
  • 前言 隨著物聯網(IoT)技術的迅猛發展,MQTT(消息隊列遙測傳輸)協議憑藉其輕量級和高效性,已成為眾多物聯網應用的首選通信標準。 MQTTnet 作為一個高性能的 .NET 開源庫,為 .NET 平臺上的 MQTT 客戶端與伺服器開發提供了強大的支持。 本文將全面介紹 MQTTnet 的核心功能 ...
  • Serilog支持多種接收器用於日誌存儲,增強器用於添加屬性,LogContext管理動態屬性,支持多種輸出格式包括純文本、JSON及ExpressionTemplate。還提供了自定義格式化選項,適用於不同需求。 ...
  • 目錄簡介獲取 HTML 文檔解析 HTML 文檔測試參考文章 簡介 動態內容網站使用 JavaScript 腳本動態檢索和渲染數據,爬取信息時需要模擬瀏覽器行為,否則獲取到的源碼基本是空的。 本文使用的爬取步驟如下: 使用 Selenium 獲取渲染後的 HTML 文檔 使用 HtmlAgility ...
  • 1.前言 什麼是熱更新 游戲或者軟體更新時,無需重新下載客戶端進行安裝,而是在應用程式啟動的情況下,在內部進行資源或者代碼更新 Unity目前常用熱更新解決方案 HybridCLR,Xlua,ILRuntime等 Unity目前常用資源管理解決方案 AssetBundles,Addressable, ...
  • 本文章主要是在C# ASP.NET Core Web API框架實現向手機發送驗證碼簡訊功能。這裡我選擇是一個互億無線簡訊驗證碼平臺,其實像阿裡雲,騰訊雲上面也可以。 首先我們先去 互億無線 https://www.ihuyi.com/api/sms.html 去註冊一個賬號 註冊完成賬號後,它會送 ...
  • 通過以下方式可以高效,並保證數據同步的可靠性 1.API設計 使用RESTful設計,確保API端點明確,並使用適當的HTTP方法(如POST用於創建,PUT用於更新)。 設計清晰的請求和響應模型,以確保客戶端能夠理解預期格式。 2.數據驗證 在伺服器端進行嚴格的數據驗證,確保接收到的數據符合預期格 ...