Easily Archive Your Gumroad Library

So you heard the news. Gumroad has changed their TOS to not allow most types of NSFW content. Like you, this news caught me completely off guard. This post will outline a quick and easy way to get the direct download links of EVERY asset you own in your Gumroad library.

Before You Start

You should never execute code from a stranger that you don't know unless you are absolutely sure you know what's going on. In this case, I'll have comments in the JS code that explains what's going on.

It's also important to note that these aren't perfect. You may get some bad links or some links that didn't resolve properly. If this happens, try loading bits of your library instead of all of it at once -- that could be the culprit.

Running the Script

  1. Navigate down and choose a script, then copy the script contents to your clipboard
  2. Navigate to your Gumroad library (should be https://app.gumroad.com/library)
  3. Load ALL CONTENT by scrolling all the way to the bottom of your library
  4. Open your browser's Dev Tools -- in Chrome this is F12
  5. Paste the code into the Console, then hit ENTER
  6. You'll be prompted to save a text file
  7. With this text file, you can now import this into a download manager like JDownloader or Internet Download Manager. I don't recommend plugging the text file from Script 1 straight into a downloader though.

Scripts

async function fetchUrls(link) {
    // Fetch and process URLs from the given link
    return fetch(link)
        .then(res => res.text())
        .then(text => {
            let parser = new DOMParser();
            let doc = parser.parseFromString(text, "text/html");
            var script = doc.querySelector("script[data-component-name]");
            var links = Array.from(JSON.parse(script.innerText).content.content_items).map((item) => { return "https://app.gumroad.com" + item.download_url });
            // Return both the original link and the associated download URLs
            return {link, downloads: links};
        });
}

Promise.all(Array.from(document.querySelectorAll("article a"))
    .filter((link) => link.href.includes("/d/"))
    .map((a) => a.href)
    .map((link) => {
        // Fetch URLs and maintain their association with the original link
        return fetchUrls(link);
    }))
    .then(function(results) {
        // Process results to group downloads by their originating link
        let groupedDownloads = results.reduce((acc, {link, downloads}) => {
            acc[link] = downloads;
            return acc;
        }, {});

        // Prepare data for export
        let exportData = Object.entries(groupedDownloads).map(([page, downloads]) => {
            return `${page}\n${downloads.join("\n")}`;
        }).join("\n\n");

        // Create a blob and download it
        var blob = new Blob([exportData], {type: "text/plain;charset=utf-8"});
        var url = window.URL || window.webkitURL;
        var downloadLink = url.createObjectURL(blob);
        var a = document.createElement("a");
        a.download = "categorized_downloads_gumroad.txt";
        document.body.appendChild(a);
        a.href = downloadLink;
        a.click();
        a.remove();
    });

Script 2 (Dumps all download URLs to text file)

async function fetchUrls(link) {
    // Fetch and process URLs from the given link
    return fetch(link)
        .then(res => res.text())
        .then(text => {
            let parser = new DOMParser();
            let doc = parser.parseFromString(text, "text/html");
            var script = doc.querySelector("script[data-component-name]");
            // Extract just the download URLs
            var links = Array.from(JSON.parse(script.innerText).content.content_items)
                             .map(item => "https://app.gumroad.com" + item.download_url);
            return links;
        });
}

Promise.all(Array.from(document.querySelectorAll("article a"))
    .filter(link => link.href.includes("/d/"))
    .map(a => a.href)
    .map(fetchUrls))
    .then(function(allUrls) {
        // Flatten the array of arrays and prepare data for export
        let flatUrls = allUrls.flat();
        let exportData = flatUrls.join("\n");

        // Create a blob and download it
        var blob = new Blob([exportData], {type: "text/plain;charset=utf-8"});
        var url = window.URL || window.webkitURL;
        var downloadLink = url.createObjectURL(blob);
        var a = document.createElement("a");
        a.download = "downloads_gumroad.txt";
        document.body.appendChild(a);
        a.href = downloadLink;
        a.click();
        a.remove();
    });