There is such a parser, it already works BUT:
Now the links to the pages that are parsed are manually substituted, but I need to first collect the necessary links into the array, despite the fact that I don’t know how many of them, what their nesting, etc., I only know the url of the site from which I need to parse the articles
const tress = require('tress'); const needle = require("needle"); const cheerio = require("cheerio"); const async = require("async"); const fs = require('fs'); let aUrl = [ // Здесь нужно написать цикл который будет собирать все url в массив 'ссылка на товар', 'ссылка на товар', 'ссылка на товар', ]; const jquery = body => cheerio.load(body); let products = []; let parsePage = ($) => { let name = $("#shop-production-view > h1").first().text(); let categories = $(".breadcrumb").text(); let price = $(".price").text(); let content = $('.content_item').html(); let images = $(".image").find("img").attr("src"); // let $imageLink = $(".shop-production-view .image a"), // img = ''; // if ($imageLink.length > 0) { // img = $imageLink.attr("href"); // } products.push({ name, categories, content, price, images, // img, }); }; let q = tress((url, callback) => { needle.get(url, { }, (err, res) => { if (err) { throw err; } parsePage(jquery(res.body)); callback(); }); }, 5); q.drain = () => { console.log(products); }; for (let i = 0; i < aUrl.length; i++) { q.push(aUrl[i]); }