0
votes

This is the page I'm trying to scrape: https://zh.wikisource.org/wiki/%E8%AE%80%E9%80%9A%E9%91%92%E8%AB%96/%E5%8D%B701

The page is encoded in UTF-8.

Here is my code:

import requests as r
from bs4 import BeautifulSoup as soup
import os
import urllib.request

#make a list of all web pages' urls
webpages=['https://zh.wikisource.org/wiki/%E8%AE%80%E9%80%9A%E9%91%92%E8%AB%96/%E5%8D%B701', 'https://zh.wikisource.org/wiki/%E8%AE%80%E9%80%9A%E9%91%92%E8%AB%96/%E5%8D%B702']

#start looping through all pages

for item in webpages:
    headers = {'User-Agent': 'Mozilla/5.0'}
    data = r.get(item, headers=headers)
    data.encoding = 'utf-8'
    page_soup = soup(data.text, 'html5lib')

    with open(r'sample_srape.txt', 'w') as file:
        file.write(str(page_soup.encode('utf-8')))
        file.close()

The output txt file does not display the Chinese characters at all. The characters are displayed like this: "\xe7\x9a\x84\xe5\x9c\x96\xe6\x9b\xb8\xe9\xa4\xa8".

How can I get the Chinese characters displayed?

2

2 Answers

1
votes

Final working code:

import requests as r
from bs4 import BeautifulSoup as soup

#make a list of all web pages' urls
webpages=['https://zh.wikisource.org/wiki/%E8%AE%80%E9%80%9A%E9%91%92%E8%AB%96/%E5%8D%B701', 'https://zh.wikisource.org/wiki/%E8%AE%80%E9%80%9A%E9%91%92%E8%AB%96/%E5%8D%B702']

#start looping through all pages

for item in webpages:
    headers = {'User-Agent': 'Mozilla/5.0'}
    data = r.get(item, headers=headers)
    data.encoding = 'utf-8'
    page_soup = soup(data.text, 'html5lib')

    with open(r'sample_srape.txt', 'w', encoding='utf-8') as file:
        file.write(page_soup.decode("unicode-escape"))
        file.close()
0
votes

While writing on file use decode("unicode-escape") You will see all Chinese character.

import requests as r
from bs4 import BeautifulSoup as soup

#make a list of all web pages' urls
webpages=['https://zh.wikisource.org/wiki/%E8%AE%80%E9%80%9A%E9%91%92%E8%AB%96/%E5%8D%B701', 'https://zh.wikisource.org/wiki/%E8%AE%80%E9%80%9A%E9%91%92%E8%AB%96/%E5%8D%B702']

#start looping through all pages

for item in webpages:
    headers = {'User-Agent': 'Mozilla/5.0'}
    data = r.get(item, headers=headers)
    data.encoding = 'utf-8'
    page_soup = soup(data.text, 'html5lib')
    #print(page_soup)

    with open(r'sample_srape.txt', 'w') as file:
        file.write(str(page_soup.decode("unicode-escape")))
        file.close()