Compare commits

..

66 Commits

Author SHA1 Message Date
37660abe64 Update repository.json 2025-04-24 18:29:07 +00:00
c2093e19aa Update package 2025-04-24 20:25:57 +02:00
b9ee2819e3 adjust scxale added more tags 2025-04-24 20:25:49 +02:00
99a4f21be3 Update package 2025-04-24 20:20:12 +02:00
edb424de99 font scale fix 2025-04-24 20:20:08 +02:00
d89e34f3e2 Update package 2025-04-24 20:18:52 +02:00
ecfd19dd85 potential fix for font scaling 2025-04-24 20:18:48 +02:00
69c722a933 Update package 2025-04-24 20:14:13 +02:00
7c37a9e413 potential fix for fontscaling 2025-04-24 20:14:10 +02:00
89d712fcce Update package 2025-04-24 20:11:22 +02:00
9f243f09c9 fix 2025-04-24 20:11:19 +02:00
ce768dce10 Update package 2025-04-24 20:09:01 +02:00
731f2def96 loggin 2025-04-24 20:08:56 +02:00
b3ffe8bf55 Update package 2025-04-24 20:05:55 +02:00
8576d4a631 Update package 2025-04-24 20:02:45 +02:00
49329d1f64 potential fix for font_scale 2025-04-24 20:02:39 +02:00
21b67f5b86 Update package 2025-04-24 20:00:49 +02:00
837979232e fix because we are using python 3.3 sic 2025-04-24 20:00:38 +02:00
547225ac4d Update package 2025-04-24 19:59:00 +02:00
e05516ab22 log font_scale 2025-04-24 19:58:47 +02:00
bbb90a8a97 Update package 2025-04-24 19:55:01 +02:00
598e22002b potential fix for font scale not working 2025-04-24 19:54:50 +02:00
e81b359294 Update package 2025-04-24 19:48:09 +02:00
8709e88fbd Update package 2025-04-24 19:46:51 +02:00
b94ad5856d fix import 2025-04-24 19:46:39 +02:00
3004ab4b41 Update package 2025-04-24 19:43:26 +02:00
14f6474cd5 fixing import error 2025-04-24 19:43:14 +02:00
d0323405c0 Update package 2025-04-24 19:40:38 +02:00
661f5b8911 fix impoert for soupsieve 2025-04-24 19:40:29 +02:00
3947b4cc4d Update package 2025-04-24 19:36:33 +02:00
2b35cf5000 add test_import to be ignored on export 2025-04-24 19:36:25 +02:00
72c684e89c Update package 2025-04-24 19:32:11 +02:00
ae1ea101d2 update package 2025-04-24 19:32:01 +02:00
3dd7b5a18d made bs4 and soupsieve standalone in this project 2025-04-24 19:26:20 +02:00
aefb27614f fully make bs4 and soupsieve standalone in the project 2025-04-24 17:39:41 +02:00
ed336866ee new build 2025-04-24 17:07:15 +02:00
afa554b52b Merge branch 'master' of git.0x42.cloud:christian.morpurgo/MarkdownLivePreview 2025-04-24 17:06:49 +02:00
e0daa147f1 fix import 2025-04-24 17:06:45 +02:00
f3cd6d3fcf Update repository.json 2025-04-24 15:04:58 +00:00
0034602fff new build 2025-04-24 17:04:04 +02:00
b29d054de2 new build 2025-04-24 16:49:41 +02:00
a9f2f951c0 update repo 2025-04-24 16:42:21 +02:00
696eb1e1dd new build 2025-04-24 16:41:00 +02:00
6379b673fc fix imports 2025-04-24 16:40:24 +02:00
b521caed0f new build 2025-04-24 16:38:39 +02:00
9bcd725a41 new build added 2025-04-24 16:37:26 +02:00
aec40dcf2f remove old package 2025-04-24 16:33:56 +02:00
b25ab1a4c6 Merge branch 'master' of git.0x42.cloud:christian.morpurgo/MarkdownLivePreview 2025-04-24 16:32:31 +02:00
243555c0c9 change imports move libraries into lib 2025-04-24 16:32:24 +02:00
40da1972af Delete dependencies.json 2025-04-24 14:18:49 +00:00
af7b054d5d Update repository.json 2025-04-24 14:16:27 +00:00
4d3c2a7139 Update repository.json 2025-04-24 14:05:29 +00:00
06d45e51b9 archive 2025-04-24 16:03:43 +02:00
0ddc896fc3 Merge branch 'master' of git.0x42.cloud:christian.morpurgo/MarkdownLivePreview 2025-04-24 16:03:09 +02:00
6f488e0d64 Update repository.json 2025-04-24 13:35:51 +00:00
4eed4934ff Update repository.json 2025-04-24 13:30:58 +00:00
626793e31d Update repository.json 2025-04-24 13:22:41 +00:00
c5493db292 Update repository.json 2025-04-24 13:18:10 +00:00
e6b028506b Update repository.json 2025-04-24 13:11:23 +00:00
2973f7f138 Update repository.json 2025-04-24 12:41:55 +00:00
2a58c22160 Update repository.json 2025-04-24 12:41:44 +00:00
beb6cfe709 Update repository.json 2025-04-24 12:33:51 +00:00
44eb19d923 Update repository.json 2025-04-24 12:32:30 +00:00
0354ddf41d Add channel.json 2025-04-24 12:30:54 +00:00
fede6c2873 Update repository.json 2025-04-24 12:29:18 +00:00
141a7d062c Update repository.json 2025-04-24 12:25:46 +00:00
33 changed files with 232 additions and 103 deletions

2
.gitattributes vendored
View File

@ -1,3 +1,5 @@
docs/ export-ignore docs/ export-ignore
resources/ resources/
!resources/*.base64 !resources/*.base64
test_imports.py export-ignore
MarkdownLivePreview.sublime-package export-ignore

View File

@ -1,3 +1,22 @@
import os
import sys
import sublime
# Add the package archive path itself to sys.path
package_path = os.path.dirname(__file__)
if package_path not in sys.path:
sys.path.insert(0, package_path)
# --- Add lib to sys.path ---
# Get the directory containing this file (MarkdownLivePreview.py)
plugin_dir = os.path.dirname(__file__)
# Construct the absolute path to the 'lib' directory
lib_path = os.path.join(plugin_dir, 'lib')
# Add it to the beginning of sys.path if it's not already there
if lib_path not in sys.path:
sys.path.insert(0, lib_path)
# --- End sys.path modification ---
""" """
Terminology Terminology
original_view: the view in the regular editor, without it's own window original_view: the view in the regular editor, without it's own window
@ -8,9 +27,7 @@ preview_window: the window with the markdown file and the preview
""" """
import time import time
import os.path
import struct import struct
import sublime
import sublime_plugin import sublime_plugin
from functools import partial from functools import partial
@ -34,7 +51,7 @@ def plugin_loaded():
resources["stylesheet"] = get_resource("stylesheet.css") resources["stylesheet"] = get_resource("stylesheet.css")
# FIXME: how could we make this setting update without restarting sublime text # FIXME: how could we make this setting update without restarting sublime text
# and not loading it every update as well # and not loading it every update as well
DELAY = get_settings().get(SETTING_DELAY_BETWEEN_UPDATES) DELAY = get_settings().get(SETTING_DELAY_BETWEEN_UPDATES, 100) # Provide default
class MdlpInsertCommand(sublime_plugin.TextCommand): class MdlpInsertCommand(sublime_plugin.TextCommand):
@ -132,7 +149,6 @@ class MarkdownLivePreviewListener(sublime_plugin.EventListener):
if self.file_name is None: if self.file_name is None:
total_region = sublime.Region(0, markdown_view.size()) total_region = sublime.Region(0, markdown_view.size())
self.content = markdown_view.substr(total_region) self.content = markdown_view.substr(total_region)
markdown_view.erase(edit, total_region)
else: else:
self.content = None self.content = None
@ -153,10 +169,7 @@ class MarkdownLivePreviewListener(sublime_plugin.EventListener):
if not infos: if not infos:
return return
assert ( if markdown_view.id() in self.phantom_sets:
markdown_view.id() == self.markdown_view.id()
), "pre_close view.id() != close view.id()"
del self.phantom_sets[markdown_view.id()] del self.phantom_sets[markdown_view.id()]
self.preview_window.run_command("close_window") self.preview_window.run_command("close_window")
@ -206,6 +219,7 @@ class MarkdownLivePreviewListener(sublime_plugin.EventListener):
settings = get_settings() settings = get_settings()
delay = settings.get(SETTING_DELAY_BETWEEN_UPDATES, 100) # Provide default delay = settings.get(SETTING_DELAY_BETWEEN_UPDATES, 100) # Provide default
font_scale = settings.get(SETTING_FONT_SCALE, 1.0) # Provide default font_scale = settings.get(SETTING_FONT_SCALE, 1.0) # Provide default
print("--- MarkdownLivePreview: Using font_scale: {} ---".format(font_scale))
if time.time() - self.last_update < delay / 1000: if time.time() - self.last_update < delay / 1000:
return return
@ -213,6 +227,11 @@ class MarkdownLivePreviewListener(sublime_plugin.EventListener):
if markdown_view.buffer_id() == 0: if markdown_view.buffer_id() == 0:
return return
# Check if the phantom set still exists for this view ID
if markdown_view.id() not in self.phantom_sets:
# View might have been closed between modification and update
return
self.last_update = time.time() self.last_update = time.time()
total_region = sublime.Region(0, markdown_view.size()) total_region = sublime.Region(0, markdown_view.size())

Binary file not shown.

7
channel.json Normal file
View File

@ -0,0 +1,7 @@
{
"$schema": "sublime://packagecontrol.io/schemas/channel",
"schema_version": "4.0.0",
"repositories": [
"https://git.0x42.cloud/christian.morpurgo/MarkdownLivePreview/raw/branch/master/repository.json"
]
}

6
create_package.sh Executable file
View File

@ -0,0 +1,6 @@
#!/bin/bash
git archive --format=zip --prefix="" --output=MarkdownLivePreview.sublime-package master
git add MarkdownLivePreview.sublime-package
git commit -m "Update package"
git push origin master

View File

@ -1,7 +0,0 @@
{
"*": {
"*": [
"bs4"
]
}
}

View File

@ -4,7 +4,7 @@ __license__ = "MIT"
from collections import defaultdict from collections import defaultdict
import itertools import itertools
import sys import sys
from bs4.element import ( from ..element import (
CharsetMetaAttributeValue, CharsetMetaAttributeValue,
ContentMetaAttributeValue, ContentMetaAttributeValue,
Stylesheet, Stylesheet,

View File

@ -7,13 +7,13 @@ __all__ = [
import warnings import warnings
import re import re
from bs4.builder import ( from . import (
PERMISSIVE, PERMISSIVE,
HTML, HTML,
HTML_5, HTML_5,
HTMLTreeBuilder, HTMLTreeBuilder,
) )
from bs4.element import ( from ..element import (
NamespacedAttribute, NamespacedAttribute,
nonwhitespace_re, nonwhitespace_re,
) )
@ -22,7 +22,7 @@ from html5lib.constants import (
namespaces, namespaces,
prefixes, prefixes,
) )
from bs4.element import ( from ..element import (
Comment, Comment,
Doctype, Doctype,
NavigableString, NavigableString,
@ -120,7 +120,7 @@ class TreeBuilderForHtml5lib(treebuilder_base.TreeBuilder):
if soup: if soup:
self.soup = soup self.soup = soup
else: else:
from bs4 import BeautifulSoup from .. import BeautifulSoup
# TODO: Why is the parser 'html.parser' here? To avoid an # TODO: Why is the parser 'html.parser' here? To avoid an
# infinite loop? # infinite loop?
self.soup = BeautifulSoup( self.soup = BeautifulSoup(
@ -166,7 +166,7 @@ class TreeBuilderForHtml5lib(treebuilder_base.TreeBuilder):
return TextNode(Comment(data), self.soup) return TextNode(Comment(data), self.soup)
def fragmentClass(self): def fragmentClass(self):
from bs4 import BeautifulSoup from .. import BeautifulSoup
# TODO: Why is the parser 'html.parser' here? To avoid an # TODO: Why is the parser 'html.parser' here? To avoid an
# infinite loop? # infinite loop?
self.soup = BeautifulSoup("", "html.parser") self.soup = BeautifulSoup("", "html.parser")
@ -184,7 +184,7 @@ class TreeBuilderForHtml5lib(treebuilder_base.TreeBuilder):
return treebuilder_base.TreeBuilder.getFragment(self).element return treebuilder_base.TreeBuilder.getFragment(self).element
def testSerializer(self, element): def testSerializer(self, element):
from bs4 import BeautifulSoup from .. import BeautifulSoup
rv = [] rv = []
doctype_re = re.compile(r'^(.*?)(?: PUBLIC "(.*?)"(?: "(.*?)")?| SYSTEM "(.*?)")?$') doctype_re = re.compile(r'^(.*?)(?: PUBLIC "(.*?)"(?: "(.*?)")?| SYSTEM "(.*?)")?$')

View File

@ -34,16 +34,16 @@ CONSTRUCTOR_STRICT_IS_DEPRECATED = major == 3 and minor == 3
CONSTRUCTOR_TAKES_CONVERT_CHARREFS = major == 3 and minor >= 4 CONSTRUCTOR_TAKES_CONVERT_CHARREFS = major == 3 and minor >= 4
from bs4.element import ( from ..element import (
CData, CData,
Comment, Comment,
Declaration, Declaration,
Doctype, Doctype,
ProcessingInstruction, ProcessingInstruction,
) )
from bs4.dammit import EntitySubstitution, UnicodeDammit from ..dammit import EntitySubstitution, UnicodeDammit
from bs4.builder import ( from . import (
HTML, HTML,
HTMLTreeBuilder, HTMLTreeBuilder,
STRICT, STRICT,

View File

@ -14,14 +14,14 @@ except ImportError as e:
from io import BytesIO from io import BytesIO
from io import StringIO from io import StringIO
from lxml import etree from lxml import etree
from bs4.element import ( from ..element import (
Comment, Comment,
Doctype, Doctype,
NamespacedAttribute, NamespacedAttribute,
ProcessingInstruction, ProcessingInstruction,
XMLProcessingInstruction, XMLProcessingInstruction,
) )
from bs4.builder import ( from . import (
FAST, FAST,
HTML, HTML,
HTMLTreeBuilder, HTMLTreeBuilder,
@ -29,7 +29,7 @@ from bs4.builder import (
ParserRejectedMarkup, ParserRejectedMarkup,
TreeBuilder, TreeBuilder,
XML) XML)
from bs4.dammit import EncodingDetector from ..dammit import EncodingDetector
LXML = 'lxml' LXML = 'lxml'

View File

@ -6,9 +6,9 @@ __license__ = "MIT"
import cProfile import cProfile
from io import StringIO from io import StringIO
from html.parser import HTMLParser from html.parser import HTMLParser
import bs4 from . import BeautifulSoup as bs4
from bs4 import BeautifulSoup, __version__ from . import BeautifulSoup, __version__
from bs4.builder import builder_registry from .builder import builder_registry
import os import os
import pstats import pstats

View File

@ -9,14 +9,16 @@ import re
import sys import sys
import warnings import warnings
try: try:
import soupsieve # We are installed under the bs4 package
from soupsieve import *
except ImportError as e: except ImportError as e:
# We are installed standalone, or soupsieve is not installed.
soupsieve = None soupsieve = None
warnings.warn( warnings.warn(
'The soupsieve package is not installed. CSS selectors cannot be used.' 'The soupsieve package is not installed. CSS selectors cannot be used.'
) )
from bs4.formatter import ( from .formatter import (
Formatter, Formatter,
HTMLFormatter, HTMLFormatter,
XMLFormatter, XMLFormatter,
@ -380,7 +382,7 @@ class PageElement(object):
and not isinstance(new_child, NavigableString)): and not isinstance(new_child, NavigableString)):
new_child = NavigableString(new_child) new_child = NavigableString(new_child)
from bs4 import BeautifulSoup from . import BeautifulSoup
if isinstance(new_child, BeautifulSoup): if isinstance(new_child, BeautifulSoup):
# We don't want to end up with a situation where one BeautifulSoup # We don't want to end up with a situation where one BeautifulSoup
# object contains another. Insert the children one at a time. # object contains another. Insert the children one at a time.

View File

@ -1,4 +1,4 @@
from bs4.dammit import EntitySubstitution from .dammit import EntitySubstitution
class Formatter(EntitySubstitution): class Formatter(EntitySubstitution):
"""Describes a strategy to use when outputting a parse tree to a string. """Describes a strategy to use when outputting a parse tree to a string.

View File

@ -9,8 +9,8 @@ import copy
import functools import functools
import unittest import unittest
from unittest import TestCase from unittest import TestCase
from bs4 import BeautifulSoup from . import BeautifulSoup
from bs4.element import ( from .element import (
CharsetMetaAttributeValue, CharsetMetaAttributeValue,
Comment, Comment,
ContentMetaAttributeValue, ContentMetaAttributeValue,
@ -22,7 +22,7 @@ from bs4.element import (
Tag Tag
) )
from bs4.builder import HTMLParserTreeBuilder from .builder import HTMLParserTreeBuilder
default_builder = HTMLParserTreeBuilder default_builder = HTMLParserTreeBuilder
BAD_DOCUMENT = """A bare string BAD_DOCUMENT = """A bare string

View File

@ -3,21 +3,21 @@
import unittest import unittest
import warnings import warnings
from bs4 import BeautifulSoup from .. import BeautifulSoup
from bs4.builder import ( from ..builder import (
builder_registry as registry, builder_registry as registry,
HTMLParserTreeBuilder, HTMLParserTreeBuilder,
TreeBuilderRegistry, TreeBuilderRegistry,
) )
try: try:
from bs4.builder import HTML5TreeBuilder from ..builder import HTML5TreeBuilder
HTML5LIB_PRESENT = True HTML5LIB_PRESENT = True
except ImportError: except ImportError:
HTML5LIB_PRESENT = False HTML5LIB_PRESENT = False
try: try:
from bs4.builder import ( from ..builder import (
LXMLTreeBuilderForXML, LXMLTreeBuilderForXML,
LXMLTreeBuilder, LXMLTreeBuilder,
) )

View File

@ -3,12 +3,12 @@
import warnings import warnings
try: try:
from bs4.builder import HTML5TreeBuilder from ..builder import HTML5TreeBuilder
HTML5LIB_PRESENT = True HTML5LIB_PRESENT = True
except ImportError as e: except ImportError as e:
HTML5LIB_PRESENT = False HTML5LIB_PRESENT = False
from bs4.element import SoupStrainer from ..element import SoupStrainer
from bs4.testing import ( from ..testing import (
HTML5TreeBuilderSmokeTest, HTML5TreeBuilderSmokeTest,
SoupTest, SoupTest,
skipIf, skipIf,

View File

@ -3,9 +3,9 @@ trees."""
from pdb import set_trace from pdb import set_trace
import pickle import pickle
from bs4.testing import SoupTest, HTMLTreeBuilderSmokeTest from ..testing import SoupTest, HTMLTreeBuilderSmokeTest
from bs4.builder import HTMLParserTreeBuilder from ..builder import HTMLParserTreeBuilder
from bs4.builder._htmlparser import BeautifulSoupHTMLParser from ..builder._htmlparser import BeautifulSoupHTMLParser
class HTMLParserTreeBuilderSmokeTest(SoupTest, HTMLTreeBuilderSmokeTest): class HTMLParserTreeBuilderSmokeTest(SoupTest, HTMLTreeBuilderSmokeTest):

View File

@ -12,16 +12,16 @@ except ImportError as e:
LXML_VERSION = (0,) LXML_VERSION = (0,)
if LXML_PRESENT: if LXML_PRESENT:
from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML from ..builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
from bs4 import ( from .. import (
BeautifulSoup, BeautifulSoup,
BeautifulStoneSoup, BeautifulStoneSoup,
) )
from bs4.element import Comment, Doctype, SoupStrainer from ..element import Comment, Doctype, SoupStrainer
from bs4.testing import skipIf from ..testing import skipIf
from bs4.tests import test_htmlparser from . import test_htmlparser
from bs4.testing import ( from ..testing import (
HTMLTreeBuilderSmokeTest, HTMLTreeBuilderSmokeTest,
XMLTreeBuilderSmokeTest, XMLTreeBuilderSmokeTest,
SoupTest, SoupTest,

View File

@ -7,17 +7,17 @@ import unittest
import sys import sys
import tempfile import tempfile
from bs4 import ( from .. import (
BeautifulSoup, BeautifulSoup,
BeautifulStoneSoup, BeautifulStoneSoup,
GuessedAtParserWarning, GuessedAtParserWarning,
MarkupResemblesLocatorWarning, MarkupResemblesLocatorWarning,
) )
from bs4.builder import ( from ..builder import (
TreeBuilder, TreeBuilder,
ParserRejectedMarkup, ParserRejectedMarkup,
) )
from bs4.element import ( from ..element import (
CharsetMetaAttributeValue, CharsetMetaAttributeValue,
Comment, Comment,
ContentMetaAttributeValue, ContentMetaAttributeValue,
@ -27,13 +27,13 @@ from bs4.element import (
NavigableString, NavigableString,
) )
import bs4.dammit from ..dammit import *
from bs4.dammit import ( from ..dammit import (
EntitySubstitution, EntitySubstitution,
UnicodeDammit, UnicodeDammit,
EncodingDetector, EncodingDetector,
) )
from bs4.testing import ( from ..testing import (
default_builder, default_builder,
SoupTest, SoupTest,
skipIf, skipIf,
@ -41,7 +41,7 @@ from bs4.testing import (
import warnings import warnings
try: try:
from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML from ..builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
LXML_PRESENT = True LXML_PRESENT = True
except ImportError as e: except ImportError as e:
LXML_PRESENT = False LXML_PRESENT = False
@ -120,11 +120,10 @@ class TestConstructor(SoupTest):
def feed(self, *args, **kwargs): def feed(self, *args, **kwargs):
raise ParserRejectedMarkup("Nope.") raise ParserRejectedMarkup("Nope.")
def prepare_markup(self, *args, **kwargs): def prepare_markup(self, markup, *args, **kwargs):
# We're going to try two different ways of preparing this markup, # We're going to try two different ways of preparing this markup,
# but feed() will reject both of them. # but feed() will reject both of them.
yield markup, None, None, False yield markup, None, None, False
yield markup, None, None, False
import re import re
self.assertRaisesRegex( self.assertRaisesRegex(
@ -418,13 +417,13 @@ class TestEncodingConversion(SoupTest):
def test_ascii_in_unicode_out(self): def test_ascii_in_unicode_out(self):
# ASCII input is converted to Unicode. The original_encoding # ASCII input is converted to Unicode. The original_encoding
# attribute is set to 'utf-8', a superset of ASCII. # attribute is set to 'utf-8', a superset of ASCII.
chardet = bs4.dammit.chardet_dammit chardet = chardet_dammit
logging.disable(logging.WARNING) logging.disable(logging.WARNING)
try: try:
def noop(str): def noop(str):
return None return None
# Disable chardet, which will realize that the ASCII is ASCII. # Disable chardet, which will realize that the ASCII is ASCII.
bs4.dammit.chardet_dammit = noop chardet_dammit = noop
ascii = b"<foo>a</foo>" ascii = b"<foo>a</foo>"
soup_from_ascii = self.soup(ascii) soup_from_ascii = self.soup(ascii)
unicode_output = soup_from_ascii.decode() unicode_output = soup_from_ascii.decode()
@ -433,7 +432,7 @@ class TestEncodingConversion(SoupTest):
self.assertEqual(soup_from_ascii.original_encoding.lower(), "utf-8") self.assertEqual(soup_from_ascii.original_encoding.lower(), "utf-8")
finally: finally:
logging.disable(logging.NOTSET) logging.disable(logging.NOTSET)
bs4.dammit.chardet_dammit = chardet chardet_dammit = chardet
def test_unicode_in_unicode_out(self): def test_unicode_in_unicode_out(self):
# Unicode input is left alone. The original_encoding attribute # Unicode input is left alone. The original_encoding attribute
@ -574,12 +573,12 @@ class TestUnicodeDammit(unittest.TestCase):
doc = b"""\357\273\277<?xml version="1.0" encoding="UTF-8"?> doc = b"""\357\273\277<?xml version="1.0" encoding="UTF-8"?>
<html><b>\330\250\330\252\330\261</b> <html><b>\330\250\330\252\330\261</b>
<i>\310\322\321\220\312\321\355\344</i></html>""" <i>\310\322\321\220\312\321\355\344</i></html>"""
chardet = bs4.dammit.chardet_dammit chardet = chardet_dammit
logging.disable(logging.WARNING) logging.disable(logging.WARNING)
try: try:
def noop(str): def noop(str):
return None return None
bs4.dammit.chardet_dammit = noop chardet_dammit = noop
dammit = UnicodeDammit(doc) dammit = UnicodeDammit(doc)
self.assertEqual(True, dammit.contains_replacement_characters) self.assertEqual(True, dammit.contains_replacement_characters)
self.assertTrue("\ufffd" in dammit.unicode_markup) self.assertTrue("\ufffd" in dammit.unicode_markup)
@ -588,7 +587,7 @@ class TestUnicodeDammit(unittest.TestCase):
self.assertTrue(soup.contains_replacement_characters) self.assertTrue(soup.contains_replacement_characters)
finally: finally:
logging.disable(logging.NOTSET) logging.disable(logging.NOTSET)
bs4.dammit.chardet_dammit = chardet chardet_dammit = chardet
def test_byte_order_mark_removed(self): def test_byte_order_mark_removed(self):
# A document written in UTF-16LE will have its byte order marker stripped. # A document written in UTF-16LE will have its byte order marker stripped.
@ -613,7 +612,7 @@ class TestUnicodeDammit(unittest.TestCase):
self.assertRaises(UnicodeDecodeError, doc.decode, "utf8") self.assertRaises(UnicodeDecodeError, doc.decode, "utf8")
# Unicode, Dammit thinks the whole document is Windows-1252, # Unicode, Dammit thinks the whole document is Windows-1252,
# and decodes it into "☃☃☃Hi, I like Windows!☃☃☃" # and decodes it into "☃☃☃"Hi, I like Windows!"☃☃☃"
# But if we run it through fix_embedded_windows_1252, it's fixed: # But if we run it through fix_embedded_windows_1252, it's fixed:

View File

@ -14,12 +14,12 @@ import copy
import pickle import pickle
import re import re
import warnings import warnings
from bs4 import BeautifulSoup from .. import BeautifulSoup
from bs4.builder import ( from ..builder import (
builder_registry, builder_registry,
HTMLParserTreeBuilder, HTMLParserTreeBuilder,
) )
from bs4.element import ( from ..element import (
PY3K, PY3K,
CData, CData,
Comment, Comment,
@ -33,11 +33,11 @@ from bs4.element import (
Tag, Tag,
TemplateString, TemplateString,
) )
from bs4.testing import ( from ..testing import (
SoupTest, SoupTest,
skipIf, skipIf,
) )
from soupsieve import SelectorSyntaxError from ...soupsieve import SelectorSyntaxError
XML_BUILDER_PRESENT = (builder_registry.lookup("xml") is not None) XML_BUILDER_PRESENT = (builder_registry.lookup("xml") is not None)
LXML_PRESENT = (builder_registry.lookup("lxml") is not None) LXML_PRESENT = (builder_registry.lookup("lxml") is not None)

View File

@ -6,10 +6,13 @@ from .import css_types as ct
import unicodedata import unicodedata
from collections.abc import Sequence from collections.abc import Sequence
import bs4 from ..bs4 import BeautifulSoup
from ..bs4.element import (
Tag, NavigableString, Comment, Declaration, CData, ProcessingInstruction, Doctype
)
# Empty tag pattern (whitespace okay) # Empty tag pattern (whitespace okay)
RE_NOT_EMPTY = re.compile('[^ \t\r\n\f]') RE_NOT_EMPTY = re.compile(r'[^ \t\r\n\f]')
RE_NOT_WS = re.compile('[^ \t\r\n\f]+') RE_NOT_WS = re.compile('[^ \t\r\n\f]+')
@ -90,37 +93,37 @@ class _DocumentNav(object):
@staticmethod @staticmethod
def is_doc(obj): def is_doc(obj):
"""Is `BeautifulSoup` object.""" """Is `BeautifulSoup` object."""
return isinstance(obj, bs4.BeautifulSoup) return isinstance(obj, BeautifulSoup)
@staticmethod @staticmethod
def is_tag(obj): def is_tag(obj):
"""Is tag.""" """Is tag."""
return isinstance(obj, bs4.Tag) return isinstance(obj, Tag)
@staticmethod @staticmethod
def is_declaration(obj): # pragma: no cover def is_declaration(obj): # pragma: no cover
"""Is declaration.""" """Is declaration."""
return isinstance(obj, bs4.Declaration) return isinstance(obj, Declaration)
@staticmethod @staticmethod
def is_cdata(obj): def is_cdata(obj):
"""Is CDATA.""" """Is CDATA."""
return isinstance(obj, bs4.CData) return isinstance(obj, CData)
@staticmethod @staticmethod
def is_processing_instruction(obj): # pragma: no cover def is_processing_instruction(obj): # pragma: no cover
"""Is processing instruction.""" """Is processing instruction."""
return isinstance(obj, bs4.ProcessingInstruction) return isinstance(obj, ProcessingInstruction)
@staticmethod @staticmethod
def is_navigable_string(obj): def is_navigable_string(obj):
"""Is navigable string.""" """Is navigable string."""
return isinstance(obj, bs4.NavigableString) return isinstance(obj, NavigableString)
@staticmethod @staticmethod
def is_special_string(obj): def is_special_string(obj):
"""Is special string.""" """Is special string."""
return isinstance(obj, (bs4.Comment, bs4.Declaration, bs4.CData, bs4.ProcessingInstruction, bs4.Doctype)) return isinstance(obj, (Comment, Declaration, CData, ProcessingInstruction, Doctype))
@classmethod @classmethod
def is_content_string(cls, obj): def is_content_string(cls, obj):

View File

@ -11,7 +11,8 @@ import os.path
import concurrent.futures import concurrent.futures
import urllib.request import urllib.request
import base64 import base64
import bs4 from .lib.bs4 import BeautifulSoup as bs4
from .lib.bs4.element import Comment
from functools import partial from functools import partial
@ -33,7 +34,7 @@ def markdown2html(markdown, basepath, re_render, resources, viewport_width, font
""" """
html = markdowner.convert(markdown) html = markdowner.convert(markdown)
soup = bs4.BeautifulSoup(html, "html.parser") soup = bs4(html, "html.parser")
for img_element in soup.find_all("img"): for img_element in soup.find_all("img"):
src = img_element["src"] src = img_element["src"]
@ -61,7 +62,7 @@ def markdown2html(markdown, basepath, re_render, resources, viewport_width, font
# remove comments, because they pollute the console with error messages # remove comments, because they pollute the console with error messages
for comment_element in soup.find_all( for comment_element in soup.find_all(
text=lambda text: isinstance(text, bs4.Comment) text=lambda text: isinstance(text, Comment)
): ):
comment_element.extract() comment_element.extract()
@ -79,14 +80,61 @@ def markdown2html(markdown, basepath, re_render, resources, viewport_width, font
.replace("\n", "<br />") .replace("\n", "<br />")
) )
code_element.replace_with(bs4.BeautifulSoup(fixed_pre, "html.parser")) code_element.replace_with(bs4(fixed_pre, "html.parser"))
# FIXME: highlight the code using Sublime's syntax # FIXME: highlight the code using Sublime's syntax
# Apply font scaling via inline styles
if font_scale != 1.0:
BASE_PX_SIZE = 15 # Base font size in pixels
TAG_MULTIPLIERS = {
'p': 1.0,
'li': 1.0,
'h1': 2.0,
'h2': 1.8,
'h3': 1.6,
'h4': 1.4,
'h5': 1.2,
'h6': 1.1,
'blockquote': 1.0,
'td': 1.0,
'th': 1.0,
'dt': 1.0,
'dd': 1.0,
'table': 1.0,
'tr': 1.0,
'ul': 1.0,
'ol': 1.0,
'code': 1.0,
'pre': 1.0,
'a': 1.0,
'strong': 1.0,
'em': 1.0,
's': 1.0,
'sup': 1.0,
'sub': 1.0,
'mark': 1.0,
'small': 1.0,
'big': 1.0,
'kbd': 1.0,
'samp': 1.0,
'var': 1.0,
'cite': 1.0,
'dfn': 1.0,
'abbr': 1.0,
'acronym': 1.0,
}
# Find all tags that we want to scale
for element in soup.find_all(list(TAG_MULTIPLIERS.keys())):
multiplier = TAG_MULTIPLIERS.get(element.name, 1.0)
target_size = round(BASE_PX_SIZE * multiplier * font_scale)
# Simple style setting (overwrites existing inline style if any)
# A more robust solution would parse and merge existing styles
element['style'] = "font-size: {}px;".format(target_size)
# FIXME: report that ST doesn't support <br/> but does work with <br />... WTF? # FIXME: report that ST doesn't support <br/> but does work with <br />... WTF?
# Add font scaling CSS rule stylesheet = resources["stylesheet"] # Use only the base stylesheet
font_scale_css = "body {{ font-size: {}em; }}\n".format(font_scale)
stylesheet = font_scale_css + resources["stylesheet"]
return "<style>\n{}\n</style>\n\n{}".format(stylesheet, soup).replace( return "<style>\n{}\n</style>\n\n{}".format(stylesheet, soup).replace(
"<br/>", "<br />" "<br/>", "<br />"
@ -194,13 +242,19 @@ def get_image_size(fhandle, pathlike):
fhandle.seek(size, 1) fhandle.seek(size, 1)
byte = fhandle.read(1) byte = fhandle.read(1)
if byte == b"": if byte == b"":
fhandle = end # Reached end of file unexpectedly, break the loop
byte = fhandle.read(1) break
while ord(byte) == 0xFF: while ord(byte) == 0xFF:
byte = fhandle.read(1) byte = fhandle.read(1)
if byte == b"": # Check EOF in inner loop too
break
if byte == b"": # Break outer loop if inner loop hit EOF
break
ftype = ord(byte) ftype = ord(byte)
size = struct.unpack(">H", fhandle.read(2))[0] - 2 size = struct.unpack(">H", fhandle.read(2))[0] - 2
# Check if the loop exited because of a break (EOF) before finding the marker
if not (0xC0 <= ftype <= 0xCF):
return "unknown format {!r}".format(format_)
# We are at a SOFn block # We are at a SOFn block
fhandle.seek(1, 1) # Skip `precision' byte. fhandle.seek(1, 1) # Skip `precision' byte.
height, width = struct.unpack(">HH", fhandle.read(4)) height, width = struct.unpack(">HH", fhandle.read(4))

View File

@ -1,13 +1,17 @@
{ {
"schema_version": "3", "schema_version": "3.0.0",
"packages": [ "packages": [
{ {
"name": "MarkdownLivePreview-Fork", "name": "MarkdownLivePreview-FORK",
"description": "My enhanced live-preview fork of MarkdownLivePreview", "description": "My enhanced live-preview fork of MarkdownLivePreview",
"author": "Christian Morpurgo",
"homepage": "https://git.0x42.cloud/christian.morpurgo/MarkdownLivePreview",
"releases": [ "releases": [
{ {
"version": "1.0.0", "version": "6.0.2",
"url": "https://git.0x42.cloud/christian.morpurgo/MarkdownLivePreview/archive/v0.0.0.zip" "url": "https://git.0x42.cloud/christian.morpurgo/MarkdownLivePreview/releases/download/v6.0.2/MarkdownLivePreview.sublime-package",
"date": "2025-04-24 00:00:00",
"sublime_text": "*"
} }
] ]
} }

40
test_imports.py Normal file
View File

@ -0,0 +1,40 @@
# test_imports.py
import sys
import os
# Optional: Explicitly add project root to path if needed,
# although running from the root often suffices.
# project_root = os.path.dirname(__file__)
# if project_root not in sys.path:
# sys.path.insert(0, project_root)
print("Attempting imports...")
try:
# Try importing the main entry point for bs4 from the lib structure
from lib.bs4 import BeautifulSoup
print("- Successfully imported BeautifulSoup from lib.bs4")
# Try creating a simple soup object (tests basic bs4 internal imports)
soup = BeautifulSoup("<a></a>", "html.parser")
print(f"- Created soup object: {soup.a}")
# Try importing the main entry point for soupsieve
from lib.soupsieve import compile as soupsieve_compile
print("- Successfully imported compile from lib.soupsieve")
# Try compiling a simple selector (tests basic soupsieve internal imports)
compiled = soupsieve_compile("a")
print(f"- Compiled selector: {compiled.pattern}")
# Try using the selector (tests soupsieve -> bs4 interaction)
match = compiled.select_one(soup)
print(f"- Selector match: {match}")
print("\nBasic import and usage tests passed!")
except ImportError as e:
print(f"\nImport Error: {e}")
print("Failed to import. Check paths and internal library structure.")
except Exception as e:
print(f"\nRuntime Error: {e}")
print("Imports might have worked, but usage failed.")