J
J
jecer_inside2021-08-10 21:47:28
Python
jecer_inside, 2021-08-10 21:47:28

How to skip empty element when parsing?

when parsing a directory, there are categories where there are no subcategories
, and in this case an error pops up:

D:\cascsacsa\venv\Scripts\python.exe D:/cascsacsa/qwe.py
page: 1
  product: https://autostol63.ru/vesta/standartnoe-vesta/kolpachok-pylnik-silikonovyj-vyklyuchatelya-dveri-lada-granta----kalina----priora----vesta----datsun.html
  product: https://autostol63.ru/kalina/kalinatun/uplotnitel-vertikalnyj-rki-19.html
  product: https://autostol63.ru/avtokrepezh/zaglushka-samoreza-chernaya-komplekt-shajba-s-kryshkoj.html
Traceback (most recent call last):
  File "D:\cascsacsa\qwe.py", line 130, in <module>
    main()
  File "D:\cascsacsa\qwe.py", line 124, in main
    data = parse_products(urls)
  File "D:\cascsacsa\qwe.py", line 101, in parse_products
    category2 = soup.select_one('ul.breadcrumb > li:nth-child(3) > a > span').text.strip()
AttributeError: 'NoneType' object has no attribute 'text'

Process finished with exit code 1

how to make a skip condition if there are no subcategories?
the code itself:
# -*- coding: utf-8 -*-
import json
import xlsxwriter

import requests
from bs4 import BeautifulSoup

PAGES_COUNT = 1
OUT_FILENAME = 'out1.json'
OUT_XLSX_FILENAME = 'out.xlsx'


def dump_to_json(filename, data, **kwargs):
    kwargs.setdefault('ensure_ascii', False)
    kwargs.setdefault('indent', 1)

    with open(OUT_FILENAME, 'w') as f:
        json.dump(data, f, **kwargs)


def dump_to_xlsx(filename, data):
    if not len(data):
        return None

    with xlsxwriter.Workbook(filename) as workbook:
        ws = workbook.add_worksheet()
        bold = workbook.add_format({'bold': True})

        headers = ['Название товара', 'Цена', 'Категория', 'Подкатегория', 'ПодПодкатегория']
        # headers.extend(data[0]['techs'].keys())

        for col, h in enumerate(headers):
            ws.write_string(0, col, h, cell_format=bold)

        for row, item in enumerate(data, start=1):
            ws.write_string(row, 0, item['name'])
            ws.write_string(row, 1, item['amount'])
            ws.write_string(row, 2, item['category'])
            ws.write_string(row, 3, item['category2'])
            ws.write_string(row, 4, item['category3'])
            # for prop_name, prop_value in item['techs'].items():
            #     col = headers.index(prop_name)
            #     ws.write_string(row, col, prop_value)


def get_soup(url, **kwargs):
    response = requests.get(url, **kwargs)
    if response.status_code == 200:
        soup = BeautifulSoup(response.text, features='html.parser')
    else:
        soup = None
    return soup


def crawl_products(pages_count):
    urls = []
    fmt = 'https://autostol63.ru/granta/grantatun/?page={page}'

    for page_n in range(1, 1 + pages_count):
        print('page: {}'.format(page_n))

        page_url = fmt.format(page=page_n)
        soup = get_soup(page_url)
        if soup is None:
            break

        for tag in soup.select('div.caption > a'):
            href = tag.attrs['href']
            url = '{}'.format(href)
            urls.append(url)

    return urls



def parse_products(urls):

    data = []

    for url in urls:
        print('\tproduct: {}'.format(url))

        soup = get_soup(url)
        if soup is None:
            break

        name = soup.select_one('h1 > span').text.strip()
        amount = soup.select_one('span.autocalc-product-price').text.strip()
        category = soup.select_one('ul.breadcrumb > li:nth-child(2) > a > span').text.strip()
        category2 = soup.select_one('ul.breadcrumb > li:nth-child(3) > a > span').text.strip()
        category3 = soup.select_one('ul.breadcrumb > li:nth-child(4)').text.strip()

        # techs = {}
        # for row in soup.select('div.attribute > div'):
        #     cols = row.select('span')
        #     cols = [c.text.strip() for c in cols]
        #     techs[cols[0]] = cols[1]

        item = {
            'name': name,
            'amount': amount,
            'category': category,
            'category2': category2,
            'category3': category3,
        }
        data.append(item)

    return data


def main():
    urls = crawl_products(PAGES_COUNT)
    data = parse_products(urls)
    dump_to_json(OUT_FILENAME, data)
    dump_to_xlsx(OUT_XLSX_FILENAME, data)


if __name__ == '__main__':
    main()


debugger shows nothing, only shows the same errors

Answer the question

In order to leave comments, you need to log in

1 answer(s)
J
jecer_inside, 2021-08-11
@jecer_inside

decision

def get_text(el) -> str:
            if not el:
                return ""

            return el.get_text(strip=True)
        category2 = get_text(soup.select_one('ul.breadcrumb > li:nth-child(3) > a > span'))

Didn't find what you were looking for?

Ask your question

Ask a Question

731 491 924 answers to any question