Compare commits

...

42 Commits

Author SHA1 Message Date
37660abe64 Update repository.json 2025-04-24 18:29:07 +00:00
c2093e19aa Update package 2025-04-24 20:25:57 +02:00
b9ee2819e3 adjust scxale added more tags 2025-04-24 20:25:49 +02:00
99a4f21be3 Update package 2025-04-24 20:20:12 +02:00
edb424de99 font scale fix 2025-04-24 20:20:08 +02:00
d89e34f3e2 Update package 2025-04-24 20:18:52 +02:00
ecfd19dd85 potential fix for font scaling 2025-04-24 20:18:48 +02:00
69c722a933 Update package 2025-04-24 20:14:13 +02:00
7c37a9e413 potential fix for fontscaling 2025-04-24 20:14:10 +02:00
89d712fcce Update package 2025-04-24 20:11:22 +02:00
9f243f09c9 fix 2025-04-24 20:11:19 +02:00
ce768dce10 Update package 2025-04-24 20:09:01 +02:00
731f2def96 loggin 2025-04-24 20:08:56 +02:00
b3ffe8bf55 Update package 2025-04-24 20:05:55 +02:00
8576d4a631 Update package 2025-04-24 20:02:45 +02:00
49329d1f64 potential fix for font_scale 2025-04-24 20:02:39 +02:00
21b67f5b86 Update package 2025-04-24 20:00:49 +02:00
837979232e fix because we are using python 3.3 sic 2025-04-24 20:00:38 +02:00
547225ac4d Update package 2025-04-24 19:59:00 +02:00
e05516ab22 log font_scale 2025-04-24 19:58:47 +02:00
bbb90a8a97 Update package 2025-04-24 19:55:01 +02:00
598e22002b potential fix for font scale not working 2025-04-24 19:54:50 +02:00
e81b359294 Update package 2025-04-24 19:48:09 +02:00
8709e88fbd Update package 2025-04-24 19:46:51 +02:00
b94ad5856d fix import 2025-04-24 19:46:39 +02:00
3004ab4b41 Update package 2025-04-24 19:43:26 +02:00
14f6474cd5 fixing import error 2025-04-24 19:43:14 +02:00
d0323405c0 Update package 2025-04-24 19:40:38 +02:00
661f5b8911 fix impoert for soupsieve 2025-04-24 19:40:29 +02:00
3947b4cc4d Update package 2025-04-24 19:36:33 +02:00
2b35cf5000 add test_import to be ignored on export 2025-04-24 19:36:25 +02:00
72c684e89c Update package 2025-04-24 19:32:11 +02:00
ae1ea101d2 update package 2025-04-24 19:32:01 +02:00
3dd7b5a18d made bs4 and soupsieve standalone in this project 2025-04-24 19:26:20 +02:00
aefb27614f fully make bs4 and soupsieve standalone in the project 2025-04-24 17:39:41 +02:00
ed336866ee new build 2025-04-24 17:07:15 +02:00
afa554b52b Merge branch 'master' of git.0x42.cloud:christian.morpurgo/MarkdownLivePreview 2025-04-24 17:06:49 +02:00
e0daa147f1 fix import 2025-04-24 17:06:45 +02:00
f3cd6d3fcf Update repository.json 2025-04-24 15:04:58 +00:00
0034602fff new build 2025-04-24 17:04:04 +02:00
b29d054de2 new build 2025-04-24 16:49:41 +02:00
a9f2f951c0 update repo 2025-04-24 16:42:21 +02:00
22 changed files with 219 additions and 98 deletions

2
.gitattributes vendored
View File

@ -1,3 +1,5 @@
docs/ export-ignore
resources/
!resources/*.base64
test_imports.py export-ignore
MarkdownLivePreview.sublime-package export-ignore

View File

@ -1,8 +1,21 @@
import os, sys, sublime
pkg = os.path.basename(os.path.dirname(__file__))
lib = os.path.join(sublime.packages_path(), pkg, "lib")
if lib not in sys.path:
sys.path.insert(0, lib)
import os
import sys
import sublime
# Add the package archive path itself to sys.path
package_path = os.path.dirname(__file__)
if package_path not in sys.path:
sys.path.insert(0, package_path)
# --- Add lib to sys.path ---
# Get the directory containing this file (MarkdownLivePreview.py)
plugin_dir = os.path.dirname(__file__)
# Construct the absolute path to the 'lib' directory
lib_path = os.path.join(plugin_dir, 'lib')
# Add it to the beginning of sys.path if it's not already there
if lib_path not in sys.path:
sys.path.insert(0, lib_path)
# --- End sys.path modification ---
"""
Terminology
@ -38,7 +51,7 @@ def plugin_loaded():
resources["stylesheet"] = get_resource("stylesheet.css")
# FIXME: how could we make this setting update without restarting sublime text
# and not loading it every update as well
DELAY = get_settings().get(SETTING_DELAY_BETWEEN_UPDATES)
DELAY = get_settings().get(SETTING_DELAY_BETWEEN_UPDATES, 100) # Provide default
class MdlpInsertCommand(sublime_plugin.TextCommand):
@ -136,7 +149,6 @@ class MarkdownLivePreviewListener(sublime_plugin.EventListener):
if self.file_name is None:
total_region = sublime.Region(0, markdown_view.size())
self.content = markdown_view.substr(total_region)
markdown_view.erase(edit, total_region)
else:
self.content = None
@ -157,11 +169,8 @@ class MarkdownLivePreviewListener(sublime_plugin.EventListener):
if not infos:
return
assert (
markdown_view.id() == self.markdown_view.id()
), "pre_close view.id() != close view.id()"
del self.phantom_sets[markdown_view.id()]
if markdown_view.id() in self.phantom_sets:
del self.phantom_sets[markdown_view.id()]
self.preview_window.run_command("close_window")
@ -210,6 +219,7 @@ class MarkdownLivePreviewListener(sublime_plugin.EventListener):
settings = get_settings()
delay = settings.get(SETTING_DELAY_BETWEEN_UPDATES, 100) # Provide default
font_scale = settings.get(SETTING_FONT_SCALE, 1.0) # Provide default
print("--- MarkdownLivePreview: Using font_scale: {} ---".format(font_scale))
if time.time() - self.last_update < delay / 1000:
return
@ -217,6 +227,11 @@ class MarkdownLivePreviewListener(sublime_plugin.EventListener):
if markdown_view.buffer_id() == 0:
return
# Check if the phantom set still exists for this view ID
if markdown_view.id() not in self.phantom_sets:
# View might have been closed between modification and update
return
self.last_update = time.time()
total_region = sublime.Region(0, markdown_view.size())

Binary file not shown.

6
create_package.sh Executable file
View File

@ -0,0 +1,6 @@
#!/bin/bash
git archive --format=zip --prefix="" --output=MarkdownLivePreview.sublime-package master
git add MarkdownLivePreview.sublime-package
git commit -m "Update package"
git push origin master

View File

@ -4,7 +4,7 @@ __license__ = "MIT"
from collections import defaultdict
import itertools
import sys
from bs4.element import (
from ..element import (
CharsetMetaAttributeValue,
ContentMetaAttributeValue,
Stylesheet,

View File

@ -7,13 +7,13 @@ __all__ = [
import warnings
import re
from bs4.builder import (
from . import (
PERMISSIVE,
HTML,
HTML_5,
HTMLTreeBuilder,
)
from bs4.element import (
from ..element import (
NamespacedAttribute,
nonwhitespace_re,
)
@ -22,7 +22,7 @@ from html5lib.constants import (
namespaces,
prefixes,
)
from bs4.element import (
from ..element import (
Comment,
Doctype,
NavigableString,
@ -120,7 +120,7 @@ class TreeBuilderForHtml5lib(treebuilder_base.TreeBuilder):
if soup:
self.soup = soup
else:
from bs4 import BeautifulSoup
from .. import BeautifulSoup
# TODO: Why is the parser 'html.parser' here? To avoid an
# infinite loop?
self.soup = BeautifulSoup(
@ -166,7 +166,7 @@ class TreeBuilderForHtml5lib(treebuilder_base.TreeBuilder):
return TextNode(Comment(data), self.soup)
def fragmentClass(self):
from bs4 import BeautifulSoup
from .. import BeautifulSoup
# TODO: Why is the parser 'html.parser' here? To avoid an
# infinite loop?
self.soup = BeautifulSoup("", "html.parser")
@ -184,7 +184,7 @@ class TreeBuilderForHtml5lib(treebuilder_base.TreeBuilder):
return treebuilder_base.TreeBuilder.getFragment(self).element
def testSerializer(self, element):
from bs4 import BeautifulSoup
from .. import BeautifulSoup
rv = []
doctype_re = re.compile(r'^(.*?)(?: PUBLIC "(.*?)"(?: "(.*?)")?| SYSTEM "(.*?)")?$')

View File

@ -34,16 +34,16 @@ CONSTRUCTOR_STRICT_IS_DEPRECATED = major == 3 and minor == 3
CONSTRUCTOR_TAKES_CONVERT_CHARREFS = major == 3 and minor >= 4
from bs4.element import (
from ..element import (
CData,
Comment,
Declaration,
Doctype,
ProcessingInstruction,
)
from bs4.dammit import EntitySubstitution, UnicodeDammit
from ..dammit import EntitySubstitution, UnicodeDammit
from bs4.builder import (
from . import (
HTML,
HTMLTreeBuilder,
STRICT,

View File

@ -14,14 +14,14 @@ except ImportError as e:
from io import BytesIO
from io import StringIO
from lxml import etree
from bs4.element import (
from ..element import (
Comment,
Doctype,
NamespacedAttribute,
ProcessingInstruction,
XMLProcessingInstruction,
)
from bs4.builder import (
from . import (
FAST,
HTML,
HTMLTreeBuilder,
@ -29,7 +29,7 @@ from bs4.builder import (
ParserRejectedMarkup,
TreeBuilder,
XML)
from bs4.dammit import EncodingDetector
from ..dammit import EncodingDetector
LXML = 'lxml'

View File

@ -6,9 +6,9 @@ __license__ = "MIT"
import cProfile
from io import StringIO
from html.parser import HTMLParser
import bs4
from bs4 import BeautifulSoup, __version__
from bs4.builder import builder_registry
from . import BeautifulSoup as bs4
from . import BeautifulSoup, __version__
from .builder import builder_registry
import os
import pstats

View File

@ -9,14 +9,16 @@ import re
import sys
import warnings
try:
import soupsieve
# We are installed under the bs4 package
from soupsieve import *
except ImportError as e:
# We are installed standalone, or soupsieve is not installed.
soupsieve = None
warnings.warn(
'The soupsieve package is not installed. CSS selectors cannot be used.'
)
from bs4.formatter import (
from .formatter import (
Formatter,
HTMLFormatter,
XMLFormatter,
@ -380,7 +382,7 @@ class PageElement(object):
and not isinstance(new_child, NavigableString)):
new_child = NavigableString(new_child)
from bs4 import BeautifulSoup
from . import BeautifulSoup
if isinstance(new_child, BeautifulSoup):
# We don't want to end up with a situation where one BeautifulSoup
# object contains another. Insert the children one at a time.

View File

@ -1,4 +1,4 @@
from bs4.dammit import EntitySubstitution
from .dammit import EntitySubstitution
class Formatter(EntitySubstitution):
"""Describes a strategy to use when outputting a parse tree to a string.

View File

@ -9,8 +9,8 @@ import copy
import functools
import unittest
from unittest import TestCase
from bs4 import BeautifulSoup
from bs4.element import (
from . import BeautifulSoup
from .element import (
CharsetMetaAttributeValue,
Comment,
ContentMetaAttributeValue,
@ -22,7 +22,7 @@ from bs4.element import (
Tag
)
from bs4.builder import HTMLParserTreeBuilder
from .builder import HTMLParserTreeBuilder
default_builder = HTMLParserTreeBuilder
BAD_DOCUMENT = """A bare string

View File

@ -3,21 +3,21 @@
import unittest
import warnings
from bs4 import BeautifulSoup
from bs4.builder import (
from .. import BeautifulSoup
from ..builder import (
builder_registry as registry,
HTMLParserTreeBuilder,
TreeBuilderRegistry,
)
try:
from bs4.builder import HTML5TreeBuilder
from ..builder import HTML5TreeBuilder
HTML5LIB_PRESENT = True
except ImportError:
HTML5LIB_PRESENT = False
try:
from bs4.builder import (
from ..builder import (
LXMLTreeBuilderForXML,
LXMLTreeBuilder,
)

View File

@ -3,12 +3,12 @@
import warnings
try:
from bs4.builder import HTML5TreeBuilder
from ..builder import HTML5TreeBuilder
HTML5LIB_PRESENT = True
except ImportError as e:
HTML5LIB_PRESENT = False
from bs4.element import SoupStrainer
from bs4.testing import (
from ..element import SoupStrainer
from ..testing import (
HTML5TreeBuilderSmokeTest,
SoupTest,
skipIf,

View File

@ -3,9 +3,9 @@ trees."""
from pdb import set_trace
import pickle
from bs4.testing import SoupTest, HTMLTreeBuilderSmokeTest
from bs4.builder import HTMLParserTreeBuilder
from bs4.builder._htmlparser import BeautifulSoupHTMLParser
from ..testing import SoupTest, HTMLTreeBuilderSmokeTest
from ..builder import HTMLParserTreeBuilder
from ..builder._htmlparser import BeautifulSoupHTMLParser
class HTMLParserTreeBuilderSmokeTest(SoupTest, HTMLTreeBuilderSmokeTest):

View File

@ -12,16 +12,16 @@ except ImportError as e:
LXML_VERSION = (0,)
if LXML_PRESENT:
from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
from ..builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
from bs4 import (
from .. import (
BeautifulSoup,
BeautifulStoneSoup,
)
from bs4.element import Comment, Doctype, SoupStrainer
from bs4.testing import skipIf
from bs4.tests import test_htmlparser
from bs4.testing import (
from ..element import Comment, Doctype, SoupStrainer
from ..testing import skipIf
from . import test_htmlparser
from ..testing import (
HTMLTreeBuilderSmokeTest,
XMLTreeBuilderSmokeTest,
SoupTest,

View File

@ -7,17 +7,17 @@ import unittest
import sys
import tempfile
from bs4 import (
from .. import (
BeautifulSoup,
BeautifulStoneSoup,
GuessedAtParserWarning,
MarkupResemblesLocatorWarning,
)
from bs4.builder import (
from ..builder import (
TreeBuilder,
ParserRejectedMarkup,
)
from bs4.element import (
from ..element import (
CharsetMetaAttributeValue,
Comment,
ContentMetaAttributeValue,
@ -27,13 +27,13 @@ from bs4.element import (
NavigableString,
)
import bs4.dammit
from bs4.dammit import (
from ..dammit import *
from ..dammit import (
EntitySubstitution,
UnicodeDammit,
EncodingDetector,
)
from bs4.testing import (
from ..testing import (
default_builder,
SoupTest,
skipIf,
@ -41,7 +41,7 @@ from bs4.testing import (
import warnings
try:
from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
from ..builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
LXML_PRESENT = True
except ImportError as e:
LXML_PRESENT = False
@ -120,11 +120,10 @@ class TestConstructor(SoupTest):
def feed(self, *args, **kwargs):
raise ParserRejectedMarkup("Nope.")
def prepare_markup(self, *args, **kwargs):
# We're going to try two different ways of preparing this markup,
# but feed() will reject both of them.
yield markup, None, None, False
yield markup, None, None, False
def prepare_markup(self, markup, *args, **kwargs):
# We're going to try two different ways of preparing this markup,
# but feed() will reject both of them.
yield markup, None, None, False
import re
self.assertRaisesRegex(
@ -418,13 +417,13 @@ class TestEncodingConversion(SoupTest):
def test_ascii_in_unicode_out(self):
# ASCII input is converted to Unicode. The original_encoding
# attribute is set to 'utf-8', a superset of ASCII.
chardet = bs4.dammit.chardet_dammit
chardet = chardet_dammit
logging.disable(logging.WARNING)
try:
def noop(str):
return None
# Disable chardet, which will realize that the ASCII is ASCII.
bs4.dammit.chardet_dammit = noop
chardet_dammit = noop
ascii = b"<foo>a</foo>"
soup_from_ascii = self.soup(ascii)
unicode_output = soup_from_ascii.decode()
@ -433,7 +432,7 @@ class TestEncodingConversion(SoupTest):
self.assertEqual(soup_from_ascii.original_encoding.lower(), "utf-8")
finally:
logging.disable(logging.NOTSET)
bs4.dammit.chardet_dammit = chardet
chardet_dammit = chardet
def test_unicode_in_unicode_out(self):
# Unicode input is left alone. The original_encoding attribute
@ -574,12 +573,12 @@ class TestUnicodeDammit(unittest.TestCase):
doc = b"""\357\273\277<?xml version="1.0" encoding="UTF-8"?>
<html><b>\330\250\330\252\330\261</b>
<i>\310\322\321\220\312\321\355\344</i></html>"""
chardet = bs4.dammit.chardet_dammit
chardet = chardet_dammit
logging.disable(logging.WARNING)
try:
def noop(str):
return None
bs4.dammit.chardet_dammit = noop
chardet_dammit = noop
dammit = UnicodeDammit(doc)
self.assertEqual(True, dammit.contains_replacement_characters)
self.assertTrue("\ufffd" in dammit.unicode_markup)
@ -588,7 +587,7 @@ class TestUnicodeDammit(unittest.TestCase):
self.assertTrue(soup.contains_replacement_characters)
finally:
logging.disable(logging.NOTSET)
bs4.dammit.chardet_dammit = chardet
chardet_dammit = chardet
def test_byte_order_mark_removed(self):
# A document written in UTF-16LE will have its byte order marker stripped.
@ -613,7 +612,7 @@ class TestUnicodeDammit(unittest.TestCase):
self.assertRaises(UnicodeDecodeError, doc.decode, "utf8")
# Unicode, Dammit thinks the whole document is Windows-1252,
# and decodes it into "☃☃☃Hi, I like Windows!☃☃☃"
# and decodes it into "☃☃☃"Hi, I like Windows!"☃☃☃"
# But if we run it through fix_embedded_windows_1252, it's fixed:

View File

@ -14,12 +14,12 @@ import copy
import pickle
import re
import warnings
from bs4 import BeautifulSoup
from bs4.builder import (
from .. import BeautifulSoup
from ..builder import (
builder_registry,
HTMLParserTreeBuilder,
)
from bs4.element import (
from ..element import (
PY3K,
CData,
Comment,
@ -33,11 +33,11 @@ from bs4.element import (
Tag,
TemplateString,
)
from bs4.testing import (
from ..testing import (
SoupTest,
skipIf,
)
from soupsieve import SelectorSyntaxError
from ...soupsieve import SelectorSyntaxError
XML_BUILDER_PRESENT = (builder_registry.lookup("xml") is not None)
LXML_PRESENT = (builder_registry.lookup("lxml") is not None)

View File

@ -6,10 +6,13 @@ from .import css_types as ct
import unicodedata
from collections.abc import Sequence
import bs4
from ..bs4 import BeautifulSoup
from ..bs4.element import (
Tag, NavigableString, Comment, Declaration, CData, ProcessingInstruction, Doctype
)
# Empty tag pattern (whitespace okay)
RE_NOT_EMPTY = re.compile('[^ \t\r\n\f]')
RE_NOT_EMPTY = re.compile(r'[^ \t\r\n\f]')
RE_NOT_WS = re.compile('[^ \t\r\n\f]+')
@ -90,37 +93,37 @@ class _DocumentNav(object):
@staticmethod
def is_doc(obj):
"""Is `BeautifulSoup` object."""
return isinstance(obj, bs4.BeautifulSoup)
return isinstance(obj, BeautifulSoup)
@staticmethod
def is_tag(obj):
"""Is tag."""
return isinstance(obj, bs4.Tag)
return isinstance(obj, Tag)
@staticmethod
def is_declaration(obj): # pragma: no cover
"""Is declaration."""
return isinstance(obj, bs4.Declaration)
return isinstance(obj, Declaration)
@staticmethod
def is_cdata(obj):
"""Is CDATA."""
return isinstance(obj, bs4.CData)
return isinstance(obj, CData)
@staticmethod
def is_processing_instruction(obj): # pragma: no cover
"""Is processing instruction."""
return isinstance(obj, bs4.ProcessingInstruction)
return isinstance(obj, ProcessingInstruction)
@staticmethod
def is_navigable_string(obj):
"""Is navigable string."""
return isinstance(obj, bs4.NavigableString)
return isinstance(obj, NavigableString)
@staticmethod
def is_special_string(obj):
"""Is special string."""
return isinstance(obj, (bs4.Comment, bs4.Declaration, bs4.CData, bs4.ProcessingInstruction, bs4.Doctype))
return isinstance(obj, (Comment, Declaration, CData, ProcessingInstruction, Doctype))
@classmethod
def is_content_string(cls, obj):

View File

@ -11,11 +11,12 @@ import os.path
import concurrent.futures
import urllib.request
import base64
import bs4
from .lib.bs4 import BeautifulSoup as bs4
from .lib.bs4.element import Comment
from functools import partial
from markdown2 import Markdown
from .lib.markdown2 import Markdown
__all__ = ("markdown2html",)
@ -33,7 +34,7 @@ def markdown2html(markdown, basepath, re_render, resources, viewport_width, font
"""
html = markdowner.convert(markdown)
soup = bs4.BeautifulSoup(html, "html.parser")
soup = bs4(html, "html.parser")
for img_element in soup.find_all("img"):
src = img_element["src"]
@ -61,7 +62,7 @@ def markdown2html(markdown, basepath, re_render, resources, viewport_width, font
# remove comments, because they pollute the console with error messages
for comment_element in soup.find_all(
text=lambda text: isinstance(text, bs4.Comment)
text=lambda text: isinstance(text, Comment)
):
comment_element.extract()
@ -79,14 +80,61 @@ def markdown2html(markdown, basepath, re_render, resources, viewport_width, font
.replace("\n", "<br />")
)
code_element.replace_with(bs4.BeautifulSoup(fixed_pre, "html.parser"))
code_element.replace_with(bs4(fixed_pre, "html.parser"))
# FIXME: highlight the code using Sublime's syntax
# Apply font scaling via inline styles
if font_scale != 1.0:
BASE_PX_SIZE = 15 # Base font size in pixels
TAG_MULTIPLIERS = {
'p': 1.0,
'li': 1.0,
'h1': 2.0,
'h2': 1.8,
'h3': 1.6,
'h4': 1.4,
'h5': 1.2,
'h6': 1.1,
'blockquote': 1.0,
'td': 1.0,
'th': 1.0,
'dt': 1.0,
'dd': 1.0,
'table': 1.0,
'tr': 1.0,
'ul': 1.0,
'ol': 1.0,
'code': 1.0,
'pre': 1.0,
'a': 1.0,
'strong': 1.0,
'em': 1.0,
's': 1.0,
'sup': 1.0,
'sub': 1.0,
'mark': 1.0,
'small': 1.0,
'big': 1.0,
'kbd': 1.0,
'samp': 1.0,
'var': 1.0,
'cite': 1.0,
'dfn': 1.0,
'abbr': 1.0,
'acronym': 1.0,
}
# Find all tags that we want to scale
for element in soup.find_all(list(TAG_MULTIPLIERS.keys())):
multiplier = TAG_MULTIPLIERS.get(element.name, 1.0)
target_size = round(BASE_PX_SIZE * multiplier * font_scale)
# Simple style setting (overwrites existing inline style if any)
# A more robust solution would parse and merge existing styles
element['style'] = "font-size: {}px;".format(target_size)
# FIXME: report that ST doesn't support <br/> but does work with <br />... WTF?
# Add font scaling CSS rule
font_scale_css = "body {{ font-size: {}em; }}\n".format(font_scale)
stylesheet = font_scale_css + resources["stylesheet"]
stylesheet = resources["stylesheet"] # Use only the base stylesheet
return "<style>\n{}\n</style>\n\n{}".format(stylesheet, soup).replace(
"<br/>", "<br />"
@ -194,13 +242,19 @@ def get_image_size(fhandle, pathlike):
fhandle.seek(size, 1)
byte = fhandle.read(1)
if byte == b"":
fhandle = end
byte = fhandle.read(1)
# Reached end of file unexpectedly, break the loop
break
while ord(byte) == 0xFF:
byte = fhandle.read(1)
if byte == b"": # Check EOF in inner loop too
break
if byte == b"": # Break outer loop if inner loop hit EOF
break
ftype = ord(byte)
size = struct.unpack(">H", fhandle.read(2))[0] - 2
# Check if the loop exited because of a break (EOF) before finding the marker
if not (0xC0 <= ftype <= 0xCF):
return "unknown format {!r}".format(format_)
# We are at a SOFn block
fhandle.seek(1, 1) # Skip `precision' byte.
height, width = struct.unpack(">HH", fhandle.read(4))

View File

@ -2,14 +2,14 @@
"schema_version": "3.0.0",
"packages": [
{
"name": "MarkdownLivePreview",
"name": "MarkdownLivePreview-FORK",
"description": "My enhanced live-preview fork of MarkdownLivePreview",
"author": "Christian Morpurgo",
"homepage": "https://git.0x42.cloud/christian.morpurgo/MarkdownLivePreview",
"releases": [
{
"version": "6.0.0",
"url": "https://git.0x42.cloud/christian.morpurgo/MarkdownLivePreview/releases/download/v6.0.0/MarkdownLivePreview.sublime-package",
"version": "6.0.2",
"url": "https://git.0x42.cloud/christian.morpurgo/MarkdownLivePreview/releases/download/v6.0.2/MarkdownLivePreview.sublime-package",
"date": "2025-04-24 00:00:00",
"sublime_text": "*"
}

40
test_imports.py Normal file
View File

@ -0,0 +1,40 @@
# test_imports.py
import sys
import os
# Optional: Explicitly add project root to path if needed,
# although running from the root often suffices.
# project_root = os.path.dirname(__file__)
# if project_root not in sys.path:
# sys.path.insert(0, project_root)
print("Attempting imports...")
try:
# Try importing the main entry point for bs4 from the lib structure
from lib.bs4 import BeautifulSoup
print("- Successfully imported BeautifulSoup from lib.bs4")
# Try creating a simple soup object (tests basic bs4 internal imports)
soup = BeautifulSoup("<a></a>", "html.parser")
print(f"- Created soup object: {soup.a}")
# Try importing the main entry point for soupsieve
from lib.soupsieve import compile as soupsieve_compile
print("- Successfully imported compile from lib.soupsieve")
# Try compiling a simple selector (tests basic soupsieve internal imports)
compiled = soupsieve_compile("a")
print(f"- Compiled selector: {compiled.pattern}")
# Try using the selector (tests soupsieve -> bs4 interaction)
match = compiled.select_one(soup)
print(f"- Selector match: {match}")
print("\nBasic import and usage tests passed!")
except ImportError as e:
print(f"\nImport Error: {e}")
print("Failed to import. Check paths and internal library structure.")
except Exception as e:
print(f"\nRuntime Error: {e}")
print("Imports might have worked, but usage failed.")