2020-07-10 06:59:59 -05:00
|
|
|
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 100 -*- */
|
|
|
|
/*
|
|
|
|
* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#include <cassert>
|
2020-07-12 05:35:54 -05:00
|
|
|
#include <memory>
|
|
|
|
#include <queue>
|
2020-07-10 06:59:59 -05:00
|
|
|
#include <thread>
|
2020-07-12 05:35:54 -05:00
|
|
|
#include <unordered_map>
|
|
|
|
#include <vector>
|
2020-07-10 06:59:59 -05:00
|
|
|
|
|
|
|
#include "Png.hpp"
|
2020-07-12 05:35:54 -05:00
|
|
|
#include "Rectangle.hpp"
|
2020-07-10 06:59:59 -05:00
|
|
|
#include "TileDesc.hpp"
|
|
|
|
|
|
|
|
#if ENABLE_DEBUG
|
|
|
|
# define ADD_DEBUG_RENDERID (" renderid=" + Util::UniqueId() + '\n')
|
|
|
|
#else
|
|
|
|
# define ADD_DEBUG_RENDERID ("\n")
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/// A quick & dirty cache of the last few PNGs
|
|
|
|
/// and their hashes to avoid re-compression
|
|
|
|
/// wherever possible.
|
|
|
|
class PngCache
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
typedef std::shared_ptr< std::vector< char > > CacheData;
|
|
|
|
private:
|
|
|
|
struct CacheEntry {
|
|
|
|
private:
|
|
|
|
size_t _hitCount;
|
|
|
|
TileWireId _wireId;
|
|
|
|
CacheData _data;
|
|
|
|
public:
|
|
|
|
CacheEntry(const CacheData &data, TileWireId id) :
|
|
|
|
_hitCount(1), // Every entry is used at least once; prevent removal at birth.
|
|
|
|
_wireId(id),
|
|
|
|
_data(data)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t getHitCount() const
|
|
|
|
{
|
|
|
|
return _hitCount;
|
|
|
|
}
|
|
|
|
|
|
|
|
void incrementHitCount()
|
|
|
|
{
|
|
|
|
++_hitCount;
|
|
|
|
}
|
|
|
|
|
|
|
|
void decrementHitCount()
|
|
|
|
{
|
|
|
|
--_hitCount;
|
|
|
|
}
|
|
|
|
|
|
|
|
const CacheData& getData() const
|
|
|
|
{
|
|
|
|
return _data;
|
|
|
|
}
|
|
|
|
|
|
|
|
TileWireId getWireId() const
|
|
|
|
{
|
|
|
|
return _wireId;
|
|
|
|
}
|
|
|
|
} ;
|
|
|
|
size_t _cacheSize;
|
|
|
|
static const size_t CacheSizeSoftLimit = (1024 * 4 * 32); // 128k of cache
|
|
|
|
static const size_t CacheSizeHardLimit = CacheSizeSoftLimit * 2;
|
|
|
|
static const size_t CacheWidHardLimit = 4096;
|
|
|
|
size_t _cacheHits;
|
|
|
|
size_t _cacheTests;
|
|
|
|
TileWireId _nextId;
|
|
|
|
|
|
|
|
std::unordered_map< TileBinaryHash, CacheEntry > _cache;
|
|
|
|
// This uses little storage so can be much larger
|
|
|
|
std::unordered_map< TileBinaryHash, TileWireId > _hashToWireId;
|
|
|
|
|
|
|
|
void clearCache(bool logStats = false)
|
|
|
|
{
|
|
|
|
if (logStats)
|
|
|
|
LOG_DBG("cache clear " << _cache.size() << " items total size " <<
|
|
|
|
_cacheSize << " current hits " << _cacheHits);
|
|
|
|
_cache.clear();
|
|
|
|
_hashToWireId.clear();
|
|
|
|
_cacheSize = 0;
|
|
|
|
_cacheHits = 0;
|
|
|
|
_cacheTests = 0;
|
|
|
|
_nextId = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Keep these ids small and wrap them.
|
|
|
|
TileWireId createNewWireId()
|
|
|
|
{
|
|
|
|
TileWireId id = ++_nextId;
|
|
|
|
// FIXME: if we wrap - we should flush the clients too really ...
|
|
|
|
if (id < 1)
|
|
|
|
clearCache(true);
|
|
|
|
return id;
|
|
|
|
}
|
|
|
|
|
|
|
|
public:
|
|
|
|
// Performed only after a complete combinetiles
|
|
|
|
void balanceCache()
|
|
|
|
{
|
|
|
|
// A normalish PNG image size for text in a writer document is
|
|
|
|
// around 4k for a content tile, and sub 1k for a background one.
|
|
|
|
if (_cacheSize > CacheSizeHardLimit)
|
|
|
|
{
|
|
|
|
size_t avgHits = 0;
|
|
|
|
for (auto it = _cache.begin(); it != _cache.end(); ++it)
|
|
|
|
avgHits += it->second.getHitCount();
|
|
|
|
|
|
|
|
LOG_DBG("PNG cache has " << _cache.size() << " items, total size " <<
|
|
|
|
_cacheSize << ", current hits " << avgHits << ", total hit rate " <<
|
|
|
|
(_cacheHits * 100. / _cacheTests) << "% at balance start.");
|
|
|
|
avgHits /= _cache.size();
|
|
|
|
|
|
|
|
for (auto it = _cache.begin(); it != _cache.end();)
|
|
|
|
{
|
|
|
|
if ((_cacheSize > CacheSizeSoftLimit && it->second.getHitCount() == 0) ||
|
|
|
|
(_cacheSize > CacheSizeHardLimit && it->second.getHitCount() > 0 && it->second.getHitCount() <= avgHits))
|
|
|
|
{
|
|
|
|
// Shrink cache when we exceed the size to maximize
|
|
|
|
// the chance of hitting these entries in the future.
|
|
|
|
_cacheSize -= it->second.getData()->size();
|
|
|
|
it = _cache.erase(it);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (it->second.getHitCount() > 0)
|
|
|
|
it->second.decrementHitCount();
|
|
|
|
++it;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
LOG_DBG("PNG cache has " << _cache.size() << " items with total size of " <<
|
|
|
|
_cacheSize << " bytes after balance.");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (_hashToWireId.size() > CacheWidHardLimit)
|
|
|
|
{
|
|
|
|
LOG_DBG("Clear half of wid cache of size " << _hashToWireId.size());
|
|
|
|
TileWireId max = _nextId - CacheWidHardLimit/2;
|
|
|
|
for (auto it = _hashToWireId.begin(); it != _hashToWireId.end();)
|
|
|
|
{
|
|
|
|
if (it->second < max)
|
|
|
|
it = _hashToWireId.erase(it);
|
|
|
|
else
|
|
|
|
++it;
|
|
|
|
}
|
|
|
|
LOG_DBG("Wid cache is now size " << _hashToWireId.size());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Lookup an entry in the cache and store the data in output.
|
|
|
|
/// Returns true on success, otherwise false.
|
|
|
|
bool copyFromCache(const TileBinaryHash hash, std::vector<char>& output, size_t &imgSize)
|
|
|
|
{
|
|
|
|
if (hash)
|
|
|
|
{
|
|
|
|
++_cacheTests;
|
|
|
|
auto it = _cache.find(hash);
|
|
|
|
if (it != _cache.end())
|
|
|
|
{
|
|
|
|
++_cacheHits;
|
wsd: reduce debug and generally redundant logs
In 'debug' log-level we expect a detailed, but
still readable output. Having one area with
disproportionately large number of logs reduces
the overall utility of the log output.
This patch reduces a number of redundant log
entries, including errors that are already
logged. It also reduces the level of some
others from 'information' to 'debug' and
from 'debug' to 'trace'.
The goal is to make 'debug' level as useful as
possible to read the progress and be able to
understand what was going on, such that one is
able to decide which area to dig deeper into.
Then, trace level could be used to get more
insight into that area, if necessary. For
example, when investigating a test failure,
one first enables 'debug' logs and reads through.
Once a section between two debug entries is
identified as being of interest, enabling 'trace'
level logs becomes more productive as it's
now possible to easily reach the first DBG
entry and read through until the second one.
It's unfortunate that we don't have per-area
control for enabling/disabling logs, so it
is common to see more and more 'debug' log
entries added all around, making logs
less and less readable.
It is also a limitation of the levels we have
that we really only have 3 usable levels:
one, two, many. That is, 'information' for
the most important events, 'debug' for
technical details needed to investigate issues,
and 'trace' for everything else. ('warning'
and 'error' aren't really 'levels'; they have
semantics that makes them special-cases.)
So we have to avoid degrading one into the
other, or have differences without distinction.
If any of these entries are needed to be
displayed more frequently, changing them
back to 'debug' or even 'information' should
be done. Though for me they seem special
cases that don't benefit most log readings.
Change-Id: Id2c6a9dc027483b81a066b0b4b50a298c5eff449
Signed-off-by: Ashod Nakashian <ashod.nakashian@collabora.co.uk>
2022-02-19 07:30:26 -06:00
|
|
|
LOG_TRC("PNG cache with hash " << hash << " hit.");
|
2020-07-10 06:59:59 -05:00
|
|
|
output.insert(output.end(),
|
|
|
|
it->second.getData()->begin(),
|
|
|
|
it->second.getData()->end());
|
|
|
|
it->second.incrementHitCount();
|
|
|
|
imgSize = it->second.getData()->size();
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
wsd: reduce debug and generally redundant logs
In 'debug' log-level we expect a detailed, but
still readable output. Having one area with
disproportionately large number of logs reduces
the overall utility of the log output.
This patch reduces a number of redundant log
entries, including errors that are already
logged. It also reduces the level of some
others from 'information' to 'debug' and
from 'debug' to 'trace'.
The goal is to make 'debug' level as useful as
possible to read the progress and be able to
understand what was going on, such that one is
able to decide which area to dig deeper into.
Then, trace level could be used to get more
insight into that area, if necessary. For
example, when investigating a test failure,
one first enables 'debug' logs and reads through.
Once a section between two debug entries is
identified as being of interest, enabling 'trace'
level logs becomes more productive as it's
now possible to easily reach the first DBG
entry and read through until the second one.
It's unfortunate that we don't have per-area
control for enabling/disabling logs, so it
is common to see more and more 'debug' log
entries added all around, making logs
less and less readable.
It is also a limitation of the levels we have
that we really only have 3 usable levels:
one, two, many. That is, 'information' for
the most important events, 'debug' for
technical details needed to investigate issues,
and 'trace' for everything else. ('warning'
and 'error' aren't really 'levels'; they have
semantics that makes them special-cases.)
So we have to avoid degrading one into the
other, or have differences without distinction.
If any of these entries are needed to be
displayed more frequently, changing them
back to 'debug' or even 'information' should
be done. Though for me they seem special
cases that don't benefit most log readings.
Change-Id: Id2c6a9dc027483b81a066b0b4b50a298c5eff449
Signed-off-by: Ashod Nakashian <ashod.nakashian@collabora.co.uk>
2022-02-19 07:30:26 -06:00
|
|
|
LOG_TRC("PNG cache with hash " << hash << " missed.");
|
2020-07-10 06:59:59 -05:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void addToCache(const CacheData &data, TileWireId wid, const TileBinaryHash hash)
|
|
|
|
{
|
|
|
|
CacheEntry newEntry(data, wid);
|
|
|
|
|
|
|
|
if (hash)
|
|
|
|
{
|
|
|
|
// Adding duplicates causes grim wid mixups
|
|
|
|
assert(hashToWireId(hash) == wid);
|
|
|
|
assert(_cache.find(hash) == _cache.end());
|
|
|
|
|
|
|
|
data->shrink_to_fit();
|
|
|
|
_cache.emplace(hash, newEntry);
|
|
|
|
_cacheSize += data->size();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
PngCache()
|
|
|
|
{
|
|
|
|
clearCache();
|
|
|
|
}
|
|
|
|
|
|
|
|
TileWireId hashToWireId(TileBinaryHash hash)
|
|
|
|
{
|
|
|
|
TileWireId wid;
|
|
|
|
if (hash == 0)
|
|
|
|
return 0;
|
|
|
|
auto it = _hashToWireId.find(hash);
|
|
|
|
if (it != _hashToWireId.end())
|
|
|
|
wid = it->second;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
wid = createNewWireId();
|
|
|
|
_hashToWireId.emplace(hash, wid);
|
|
|
|
}
|
|
|
|
return wid;
|
|
|
|
}
|
2021-05-12 11:55:16 -05:00
|
|
|
|
|
|
|
void dumpState(std::ostream& oss)
|
|
|
|
{
|
|
|
|
oss << "\tpngCache:"
|
|
|
|
<< "\n\t\tcacheSize: " << _cacheSize
|
|
|
|
<< "\n\t\tcacheHits: " << _cacheHits
|
|
|
|
<< "\n\t\tcacheTests: " << _cacheTests
|
|
|
|
<< "\n\t\tnextId: " << _nextId
|
|
|
|
<< "\n\t\tcache entry count: "<< _cache.size();
|
|
|
|
for (const auto &it : _cache)
|
|
|
|
{
|
|
|
|
oss << "\n\t\t\thash: " << it.first
|
|
|
|
<< " hitCount: " << it.second.getHitCount()
|
|
|
|
<< " wireId: " << it.second.getWireId();
|
|
|
|
}
|
|
|
|
oss << '\n';
|
|
|
|
}
|
2020-07-10 06:59:59 -05:00
|
|
|
};
|
|
|
|
|
|
|
|
class ThreadPool {
|
|
|
|
std::mutex _mutex;
|
|
|
|
std::condition_variable _cond;
|
|
|
|
std::condition_variable _complete;
|
|
|
|
typedef std::function<void()> ThreadFn;
|
|
|
|
std::queue<ThreadFn> _work;
|
|
|
|
std::vector<std::thread> _threads;
|
|
|
|
size_t _working;
|
|
|
|
bool _shutdown;
|
|
|
|
public:
|
|
|
|
ThreadPool()
|
|
|
|
: _working(0),
|
|
|
|
_shutdown(false)
|
|
|
|
{
|
|
|
|
int maxConcurrency = 2;
|
|
|
|
#if MOBILEAPP && !defined(GTKAPP)
|
|
|
|
maxConcurrency = std::max<int>(std::thread::hardware_concurrency(), 2);
|
|
|
|
#else
|
|
|
|
const char *max = getenv("MAX_CONCURRENCY");
|
|
|
|
if (max)
|
|
|
|
maxConcurrency = atoi(max);
|
|
|
|
#endif
|
|
|
|
LOG_TRC("PNG compression thread pool size " << maxConcurrency);
|
|
|
|
for (int i = 1; i < maxConcurrency; ++i)
|
|
|
|
_threads.push_back(std::thread(&ThreadPool::work, this));
|
|
|
|
}
|
|
|
|
~ThreadPool()
|
|
|
|
{
|
|
|
|
{
|
|
|
|
std::unique_lock< std::mutex > lock(_mutex);
|
|
|
|
assert(_working == 0);
|
|
|
|
_shutdown = true;
|
|
|
|
}
|
|
|
|
_cond.notify_all();
|
|
|
|
for (auto &it : _threads)
|
|
|
|
it.join();
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t count() const
|
|
|
|
{
|
|
|
|
return _work.size();
|
|
|
|
}
|
|
|
|
|
Always lock the work queue.
The worker threads can be running and have not yet responded
to the last _cond.wait() by the time we start compressing again
- at least under valgrind:
loolforkit-nocaps: ./common/RenderTiles.hpp:304: void ThreadPool::run(): Assertion `_working == 0' failed.
==240379== Process terminating with default action of signal 6 (SIGABRT): dumping core
==240379== at 0x5505322: raise (raise.c:50)
==240379== by 0x54EE863: abort (abort.c:79)
==240379== by 0x54EE748: __assert_fail_base.cold (assert.c:92)
==240379== by 0x54FD9D5: __assert_fail (assert.c:101)
==240379== by 0x5886BB: ThreadPool::run() (RenderTiles.hpp:304)
==240379== by 0x56DC02: RenderTiles::doRender(std::shared_ptr<lok::Document>, TileCombined&, PngCache&, ThreadPool&, bool, std::function<void (unsigned char*, int, int, unsigned long, unsigned long, int, int, LibreOfficeKitTileMode)> const&, std::function<void (char const*, unsigned long)> const&) (RenderTiles.hpp:711)
==240379== by 0x5A0104: Document::renderTiles(TileCombined&, bool) (Kit.cpp:762)
==240379== by 0x59CF1A: Document::renderCombinedTiles(StringVector const&) (Kit.cpp:719)
==240379== by 0x59AEC8: Document::drainQueue(std::chrono::time_point<std::chrono::_V2::steady_clock, std::chrono::duration<long, std::ratio<1l, 1000000000l> > > const&) (Kit.cpp:1570)
That means our queue starts processing work as we do the unlocked
queue push - causing some potential badness.
Change-Id: Ib0578dac009376c0676da73a8c1d8960304dc072
Signed-off-by: Michael Meeks <michael.meeks@collabora.com>
2021-08-21 13:40:46 -05:00
|
|
|
void pushWork(const ThreadFn &fn)
|
2020-07-10 06:59:59 -05:00
|
|
|
{
|
Always lock the work queue.
The worker threads can be running and have not yet responded
to the last _cond.wait() by the time we start compressing again
- at least under valgrind:
loolforkit-nocaps: ./common/RenderTiles.hpp:304: void ThreadPool::run(): Assertion `_working == 0' failed.
==240379== Process terminating with default action of signal 6 (SIGABRT): dumping core
==240379== at 0x5505322: raise (raise.c:50)
==240379== by 0x54EE863: abort (abort.c:79)
==240379== by 0x54EE748: __assert_fail_base.cold (assert.c:92)
==240379== by 0x54FD9D5: __assert_fail (assert.c:101)
==240379== by 0x5886BB: ThreadPool::run() (RenderTiles.hpp:304)
==240379== by 0x56DC02: RenderTiles::doRender(std::shared_ptr<lok::Document>, TileCombined&, PngCache&, ThreadPool&, bool, std::function<void (unsigned char*, int, int, unsigned long, unsigned long, int, int, LibreOfficeKitTileMode)> const&, std::function<void (char const*, unsigned long)> const&) (RenderTiles.hpp:711)
==240379== by 0x5A0104: Document::renderTiles(TileCombined&, bool) (Kit.cpp:762)
==240379== by 0x59CF1A: Document::renderCombinedTiles(StringVector const&) (Kit.cpp:719)
==240379== by 0x59AEC8: Document::drainQueue(std::chrono::time_point<std::chrono::_V2::steady_clock, std::chrono::duration<long, std::ratio<1l, 1000000000l> > > const&) (Kit.cpp:1570)
That means our queue starts processing work as we do the unlocked
queue push - causing some potential badness.
Change-Id: Ib0578dac009376c0676da73a8c1d8960304dc072
Signed-off-by: Michael Meeks <michael.meeks@collabora.com>
2021-08-21 13:40:46 -05:00
|
|
|
std::unique_lock< std::mutex > lock(_mutex);
|
|
|
|
assert(_working == 0);
|
2020-07-10 06:59:59 -05:00
|
|
|
_work.push(fn);
|
|
|
|
}
|
|
|
|
|
|
|
|
void runOne(std::unique_lock< std::mutex >& lock)
|
|
|
|
{
|
|
|
|
assert(!_work.empty());
|
|
|
|
|
|
|
|
ThreadFn fn = _work.front();
|
|
|
|
_work.pop();
|
|
|
|
_working++;
|
|
|
|
lock.unlock();
|
|
|
|
|
Always lock the work queue.
The worker threads can be running and have not yet responded
to the last _cond.wait() by the time we start compressing again
- at least under valgrind:
loolforkit-nocaps: ./common/RenderTiles.hpp:304: void ThreadPool::run(): Assertion `_working == 0' failed.
==240379== Process terminating with default action of signal 6 (SIGABRT): dumping core
==240379== at 0x5505322: raise (raise.c:50)
==240379== by 0x54EE863: abort (abort.c:79)
==240379== by 0x54EE748: __assert_fail_base.cold (assert.c:92)
==240379== by 0x54FD9D5: __assert_fail (assert.c:101)
==240379== by 0x5886BB: ThreadPool::run() (RenderTiles.hpp:304)
==240379== by 0x56DC02: RenderTiles::doRender(std::shared_ptr<lok::Document>, TileCombined&, PngCache&, ThreadPool&, bool, std::function<void (unsigned char*, int, int, unsigned long, unsigned long, int, int, LibreOfficeKitTileMode)> const&, std::function<void (char const*, unsigned long)> const&) (RenderTiles.hpp:711)
==240379== by 0x5A0104: Document::renderTiles(TileCombined&, bool) (Kit.cpp:762)
==240379== by 0x59CF1A: Document::renderCombinedTiles(StringVector const&) (Kit.cpp:719)
==240379== by 0x59AEC8: Document::drainQueue(std::chrono::time_point<std::chrono::_V2::steady_clock, std::chrono::duration<long, std::ratio<1l, 1000000000l> > > const&) (Kit.cpp:1570)
That means our queue starts processing work as we do the unlocked
queue push - causing some potential badness.
Change-Id: Ib0578dac009376c0676da73a8c1d8960304dc072
Signed-off-by: Michael Meeks <michael.meeks@collabora.com>
2021-08-21 13:40:46 -05:00
|
|
|
try {
|
|
|
|
fn();
|
|
|
|
} catch(...) {
|
|
|
|
LOG_ERR("Exception in thread pool execution.");
|
|
|
|
}
|
2020-07-10 06:59:59 -05:00
|
|
|
|
|
|
|
lock.lock();
|
|
|
|
_working--;
|
|
|
|
if (_work.empty() && _working == 0)
|
|
|
|
_complete.notify_all();
|
|
|
|
}
|
|
|
|
|
|
|
|
void run()
|
|
|
|
{
|
|
|
|
std::unique_lock< std::mutex > lock(_mutex);
|
|
|
|
assert(_working == 0);
|
|
|
|
|
|
|
|
// Avoid notifying threads if we don't need to.
|
|
|
|
bool useThreads = _threads.size() > 1 && _work.size() > 1;
|
|
|
|
if (useThreads)
|
|
|
|
_cond.notify_all();
|
|
|
|
|
|
|
|
while(!_work.empty())
|
|
|
|
runOne(lock);
|
|
|
|
|
|
|
|
if (useThreads && (_working > 0 || !_work.empty()))
|
|
|
|
_complete.wait(lock, [this]() { return _working == 0 && _work.empty(); } );
|
|
|
|
|
|
|
|
assert(_working==0);
|
|
|
|
assert(_work.empty());
|
|
|
|
}
|
|
|
|
|
|
|
|
void work()
|
|
|
|
{
|
|
|
|
std::unique_lock< std::mutex > lock(_mutex);
|
|
|
|
while (!_shutdown)
|
|
|
|
{
|
|
|
|
_cond.wait(lock);
|
|
|
|
if (!_shutdown && !_work.empty())
|
|
|
|
runOne(lock);
|
|
|
|
}
|
|
|
|
}
|
2021-05-12 11:55:16 -05:00
|
|
|
|
|
|
|
void dumpState(std::ostream& oss)
|
|
|
|
{
|
|
|
|
oss << "\tthreadPool:"
|
|
|
|
<< "\n\t\tshutdown: " << _shutdown
|
|
|
|
<< "\n\t\tworking: " << _working
|
|
|
|
<< "\n\t\twork count: " << count()
|
|
|
|
<< "\n\t\tthread count " << _threads.size()
|
|
|
|
<< "\n";
|
|
|
|
}
|
2020-07-10 06:59:59 -05:00
|
|
|
};
|
|
|
|
|
|
|
|
namespace RenderTiles
|
|
|
|
{
|
|
|
|
struct Buffer {
|
|
|
|
unsigned char *_data;
|
|
|
|
Buffer()
|
|
|
|
{
|
|
|
|
_data = nullptr;
|
|
|
|
}
|
|
|
|
Buffer(size_t x, size_t y) :
|
|
|
|
Buffer()
|
|
|
|
{
|
|
|
|
allocate(x, y);
|
|
|
|
}
|
|
|
|
void allocate(size_t x, size_t y)
|
|
|
|
{
|
|
|
|
assert(!_data);
|
|
|
|
_data = static_cast<unsigned char *>(calloc(x * y, 4));
|
|
|
|
}
|
|
|
|
~Buffer()
|
|
|
|
{
|
|
|
|
if (_data)
|
|
|
|
free (_data);
|
|
|
|
}
|
|
|
|
unsigned char *data() { return _data; }
|
|
|
|
};
|
|
|
|
|
|
|
|
static void pushRendered(std::vector<TileDesc> &renderedTiles,
|
|
|
|
const TileDesc &desc, TileWireId wireId, size_t imgSize)
|
|
|
|
{
|
|
|
|
renderedTiles.push_back(desc);
|
|
|
|
renderedTiles.back().setWireId(wireId);
|
|
|
|
renderedTiles.back().setImgSize(imgSize);
|
|
|
|
}
|
2020-07-13 15:00:44 -05:00
|
|
|
|
2020-07-10 06:59:59 -05:00
|
|
|
bool doRender(std::shared_ptr<lok::Document> document,
|
|
|
|
TileCombined &tileCombined,
|
|
|
|
PngCache &pngCache,
|
|
|
|
ThreadPool &pngPool,
|
2020-07-12 16:43:51 -05:00
|
|
|
bool combined,
|
|
|
|
const std::function<void (unsigned char *data,
|
|
|
|
int offsetX, int offsetY,
|
|
|
|
size_t pixmapWidth, size_t pixmapHeight,
|
|
|
|
int pixelWidth, int pixelHeight,
|
|
|
|
LibreOfficeKitTileMode mode)>& blendWatermark,
|
2021-09-01 03:11:55 -05:00
|
|
|
const std::function<void (const char *buffer, size_t length)>& outputMessage,
|
|
|
|
unsigned mobileAppDocId)
|
2020-07-10 06:59:59 -05:00
|
|
|
{
|
2022-02-19 07:51:38 -06:00
|
|
|
const auto& tiles = tileCombined.getTiles();
|
2020-07-10 06:59:59 -05:00
|
|
|
|
|
|
|
// Calculate the area we cover
|
|
|
|
Util::Rectangle renderArea;
|
|
|
|
std::vector<Util::Rectangle> tileRecs;
|
|
|
|
tileRecs.reserve(tiles.size());
|
|
|
|
|
2022-02-19 07:51:38 -06:00
|
|
|
for (const auto& tile : tiles)
|
2020-07-10 06:59:59 -05:00
|
|
|
{
|
|
|
|
Util::Rectangle rectangle(tile.getTilePosX(), tile.getTilePosY(),
|
|
|
|
tileCombined.getTileWidth(), tileCombined.getTileHeight());
|
|
|
|
|
|
|
|
if (tileRecs.empty())
|
|
|
|
{
|
|
|
|
renderArea = rectangle;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
renderArea.extend(rectangle);
|
|
|
|
}
|
|
|
|
|
|
|
|
tileRecs.push_back(rectangle);
|
|
|
|
}
|
|
|
|
|
2020-07-13 15:00:44 -05:00
|
|
|
assert(tiles.size() == tileRecs.size());
|
|
|
|
|
2020-07-10 06:59:59 -05:00
|
|
|
const size_t tilesByX = renderArea.getWidth() / tileCombined.getTileWidth();
|
|
|
|
const size_t tilesByY = renderArea.getHeight() / tileCombined.getTileHeight();
|
2020-07-13 15:00:44 -05:00
|
|
|
const int pixelWidth = tileCombined.getWidth();
|
|
|
|
const int pixelHeight = tileCombined.getHeight();
|
|
|
|
const size_t pixmapWidth = tilesByX * pixelWidth;
|
|
|
|
const size_t pixmapHeight = tilesByY * pixelHeight;
|
2020-07-10 06:59:59 -05:00
|
|
|
|
|
|
|
if (pixmapWidth > 4096 || pixmapHeight > 4096)
|
|
|
|
LOG_WRN("Unusual extremely large tile combine of size " << pixmapWidth << 'x' << pixmapHeight);
|
|
|
|
|
|
|
|
RenderTiles::Buffer pixmap(pixmapWidth, pixmapHeight);
|
|
|
|
|
|
|
|
// Render the whole area
|
|
|
|
const double area = pixmapWidth * pixmapHeight;
|
2020-12-06 13:51:54 -06:00
|
|
|
const auto start = std::chrono::steady_clock::now();
|
2020-07-10 06:59:59 -05:00
|
|
|
LOG_TRC("Calling paintPartTile(" << (void*)pixmap.data() << ')');
|
|
|
|
document->paintPartTile(pixmap.data(),
|
|
|
|
tileCombined.getPart(),
|
|
|
|
pixmapWidth, pixmapHeight,
|
|
|
|
renderArea.getLeft(), renderArea.getTop(),
|
|
|
|
renderArea.getWidth(), renderArea.getHeight());
|
2020-12-06 13:51:54 -06:00
|
|
|
auto duration = std::chrono::steady_clock::now() - start;
|
2020-12-06 21:45:46 -06:00
|
|
|
const auto elapsedMs = std::chrono::duration_cast<std::chrono::milliseconds>(duration);
|
|
|
|
const double elapsedMics = elapsedMs.count() * 1000.; // Need MPixels/sec, use Pixels/mics.
|
|
|
|
LOG_DBG("paintPartTile at ("
|
|
|
|
<< renderArea.getLeft() << ", " << renderArea.getTop() << "), ("
|
|
|
|
<< renderArea.getWidth() << ", " << renderArea.getHeight() << ") "
|
|
|
|
<< " rendered in " << elapsedMs << " (" << area / elapsedMics << " MP/s).");
|
2020-07-10 06:59:59 -05:00
|
|
|
|
2021-09-01 03:11:55 -05:00
|
|
|
(void) mobileAppDocId;
|
2020-07-13 15:00:44 -05:00
|
|
|
|
2020-07-10 06:59:59 -05:00
|
|
|
const auto mode = static_cast<LibreOfficeKitTileMode>(document->getTileMode());
|
|
|
|
|
2020-07-15 03:52:15 -05:00
|
|
|
const size_t pixmapSize = 4 * pixmapWidth * pixmapHeight;
|
2020-07-10 06:59:59 -05:00
|
|
|
std::vector<char> output;
|
|
|
|
output.reserve(pixmapSize);
|
|
|
|
|
|
|
|
// Compress the area as tiles
|
|
|
|
std::vector<TileDesc> renderedTiles;
|
|
|
|
std::vector<TileDesc> duplicateTiles;
|
|
|
|
std::vector<TileBinaryHash> duplicateHashes;
|
|
|
|
std::vector<TileWireId> renderingIds;
|
|
|
|
|
|
|
|
size_t tileIndex = 0;
|
|
|
|
|
|
|
|
std::mutex pngMutex;
|
|
|
|
|
2022-02-19 07:51:38 -06:00
|
|
|
for (const Util::Rectangle& tileRect : tileRecs)
|
2020-07-10 06:59:59 -05:00
|
|
|
{
|
|
|
|
const size_t positionX = (tileRect.getLeft() - renderArea.getLeft()) / tileCombined.getTileWidth();
|
|
|
|
const size_t positionY = (tileRect.getTop() - renderArea.getTop()) / tileCombined.getTileHeight();
|
|
|
|
|
|
|
|
const int offsetX = positionX * pixelWidth;
|
|
|
|
const int offsetY = positionY * pixelHeight;
|
2020-07-12 16:43:51 -05:00
|
|
|
blendWatermark(pixmap.data(), offsetX, offsetY,
|
|
|
|
pixmapWidth, pixmapHeight,
|
|
|
|
pixelWidth, pixelHeight,
|
|
|
|
mode);
|
2020-07-10 06:59:59 -05:00
|
|
|
|
|
|
|
const uint64_t hash = Png::hashSubBuffer(pixmap.data(), offsetX, offsetY,
|
|
|
|
pixelWidth, pixelHeight, pixmapWidth, pixmapHeight);
|
|
|
|
|
|
|
|
TileWireId wireId = pngCache.hashToWireId(hash);
|
|
|
|
TileWireId oldWireId = tiles[tileIndex].getOldWireId();
|
|
|
|
if (hash != 0 && oldWireId == wireId)
|
|
|
|
{
|
|
|
|
// The tile content is identical to what the client already has, so skip it
|
|
|
|
LOG_TRC("Match for tile #" << tileIndex << " at (" << positionX << ',' <<
|
|
|
|
positionY << ") oldhash==hash (" << hash << "), wireId: " << wireId << " skipping");
|
2020-08-07 12:37:53 -05:00
|
|
|
// Push a zero byte image to inform WSD we didn't need that.
|
|
|
|
// This allows WSD side TileCache to free up waiting subscribers.
|
|
|
|
pushRendered(renderedTiles, tiles[tileIndex], wireId, 0);
|
2020-07-10 06:59:59 -05:00
|
|
|
tileIndex++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool skipCompress = false;
|
|
|
|
size_t imgSize = -1;
|
|
|
|
if (pngCache.copyFromCache(hash, output, imgSize))
|
|
|
|
{
|
|
|
|
pushRendered(renderedTiles, tiles[tileIndex], wireId, imgSize);
|
|
|
|
skipCompress = true;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
wsd: reduce debug and generally redundant logs
In 'debug' log-level we expect a detailed, but
still readable output. Having one area with
disproportionately large number of logs reduces
the overall utility of the log output.
This patch reduces a number of redundant log
entries, including errors that are already
logged. It also reduces the level of some
others from 'information' to 'debug' and
from 'debug' to 'trace'.
The goal is to make 'debug' level as useful as
possible to read the progress and be able to
understand what was going on, such that one is
able to decide which area to dig deeper into.
Then, trace level could be used to get more
insight into that area, if necessary. For
example, when investigating a test failure,
one first enables 'debug' logs and reads through.
Once a section between two debug entries is
identified as being of interest, enabling 'trace'
level logs becomes more productive as it's
now possible to easily reach the first DBG
entry and read through until the second one.
It's unfortunate that we don't have per-area
control for enabling/disabling logs, so it
is common to see more and more 'debug' log
entries added all around, making logs
less and less readable.
It is also a limitation of the levels we have
that we really only have 3 usable levels:
one, two, many. That is, 'information' for
the most important events, 'debug' for
technical details needed to investigate issues,
and 'trace' for everything else. ('warning'
and 'error' aren't really 'levels'; they have
semantics that makes them special-cases.)
So we have to avoid degrading one into the
other, or have differences without distinction.
If any of these entries are needed to be
displayed more frequently, changing them
back to 'debug' or even 'information' should
be done. Though for me they seem special
cases that don't benefit most log readings.
Change-Id: Id2c6a9dc027483b81a066b0b4b50a298c5eff449
Signed-off-by: Ashod Nakashian <ashod.nakashian@collabora.co.uk>
2022-02-19 07:30:26 -06:00
|
|
|
LOG_TRC("PNG cache with hash " << hash << " missed.");
|
2020-07-10 06:59:59 -05:00
|
|
|
|
|
|
|
// Don't re-compress the same thing multiple times.
|
2022-02-19 07:51:38 -06:00
|
|
|
for (const auto& id : renderingIds)
|
2020-07-10 06:59:59 -05:00
|
|
|
{
|
|
|
|
if (wireId == id)
|
|
|
|
{
|
|
|
|
pushRendered(duplicateTiles, tiles[tileIndex], wireId, 0);
|
|
|
|
duplicateHashes.push_back(hash);
|
|
|
|
skipCompress = true;
|
|
|
|
LOG_TRC("Rendering duplicate tile #" << tileIndex << " at (" << positionX << ',' <<
|
|
|
|
positionY << ") oldhash==hash (" << hash << "), wireId: " << wireId << " skipping");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!skipCompress)
|
|
|
|
{
|
|
|
|
renderingIds.push_back(wireId);
|
|
|
|
|
|
|
|
// Queue to be executed later in parallel inside 'run'
|
Always lock the work queue.
The worker threads can be running and have not yet responded
to the last _cond.wait() by the time we start compressing again
- at least under valgrind:
loolforkit-nocaps: ./common/RenderTiles.hpp:304: void ThreadPool::run(): Assertion `_working == 0' failed.
==240379== Process terminating with default action of signal 6 (SIGABRT): dumping core
==240379== at 0x5505322: raise (raise.c:50)
==240379== by 0x54EE863: abort (abort.c:79)
==240379== by 0x54EE748: __assert_fail_base.cold (assert.c:92)
==240379== by 0x54FD9D5: __assert_fail (assert.c:101)
==240379== by 0x5886BB: ThreadPool::run() (RenderTiles.hpp:304)
==240379== by 0x56DC02: RenderTiles::doRender(std::shared_ptr<lok::Document>, TileCombined&, PngCache&, ThreadPool&, bool, std::function<void (unsigned char*, int, int, unsigned long, unsigned long, int, int, LibreOfficeKitTileMode)> const&, std::function<void (char const*, unsigned long)> const&) (RenderTiles.hpp:711)
==240379== by 0x5A0104: Document::renderTiles(TileCombined&, bool) (Kit.cpp:762)
==240379== by 0x59CF1A: Document::renderCombinedTiles(StringVector const&) (Kit.cpp:719)
==240379== by 0x59AEC8: Document::drainQueue(std::chrono::time_point<std::chrono::_V2::steady_clock, std::chrono::duration<long, std::ratio<1l, 1000000000l> > > const&) (Kit.cpp:1570)
That means our queue starts processing work as we do the unlocked
queue push - causing some potential badness.
Change-Id: Ib0578dac009376c0676da73a8c1d8960304dc072
Signed-off-by: Michael Meeks <michael.meeks@collabora.com>
2021-08-21 13:40:46 -05:00
|
|
|
pngPool.pushWork([=,&output,&pixmap,&tiles,&renderedTiles,&pngCache,&pngMutex](){
|
2020-07-10 06:59:59 -05:00
|
|
|
|
|
|
|
PngCache::CacheData data(new std::vector< char >() );
|
|
|
|
data->reserve(pixmapWidth * pixmapHeight * 1);
|
|
|
|
|
wsd: reduce debug and generally redundant logs
In 'debug' log-level we expect a detailed, but
still readable output. Having one area with
disproportionately large number of logs reduces
the overall utility of the log output.
This patch reduces a number of redundant log
entries, including errors that are already
logged. It also reduces the level of some
others from 'information' to 'debug' and
from 'debug' to 'trace'.
The goal is to make 'debug' level as useful as
possible to read the progress and be able to
understand what was going on, such that one is
able to decide which area to dig deeper into.
Then, trace level could be used to get more
insight into that area, if necessary. For
example, when investigating a test failure,
one first enables 'debug' logs and reads through.
Once a section between two debug entries is
identified as being of interest, enabling 'trace'
level logs becomes more productive as it's
now possible to easily reach the first DBG
entry and read through until the second one.
It's unfortunate that we don't have per-area
control for enabling/disabling logs, so it
is common to see more and more 'debug' log
entries added all around, making logs
less and less readable.
It is also a limitation of the levels we have
that we really only have 3 usable levels:
one, two, many. That is, 'information' for
the most important events, 'debug' for
technical details needed to investigate issues,
and 'trace' for everything else. ('warning'
and 'error' aren't really 'levels'; they have
semantics that makes them special-cases.)
So we have to avoid degrading one into the
other, or have differences without distinction.
If any of these entries are needed to be
displayed more frequently, changing them
back to 'debug' or even 'information' should
be done. Though for me they seem special
cases that don't benefit most log readings.
Change-Id: Id2c6a9dc027483b81a066b0b4b50a298c5eff449
Signed-off-by: Ashod Nakashian <ashod.nakashian@collabora.co.uk>
2022-02-19 07:30:26 -06:00
|
|
|
LOG_TRC("Encode a new png for tile #" << tileIndex);
|
2020-07-10 06:59:59 -05:00
|
|
|
if (!Png::encodeSubBufferToPNG(pixmap.data(), offsetX, offsetY, pixelWidth, pixelHeight,
|
|
|
|
pixmapWidth, pixmapHeight, *data, mode))
|
|
|
|
{
|
|
|
|
// FIXME: Return error.
|
|
|
|
// sendTextFrameAndLogError("error: cmd=tile kind=failure");
|
|
|
|
LOG_ERR("Failed to encode tile into PNG.");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
wsd: reduce debug and generally redundant logs
In 'debug' log-level we expect a detailed, but
still readable output. Having one area with
disproportionately large number of logs reduces
the overall utility of the log output.
This patch reduces a number of redundant log
entries, including errors that are already
logged. It also reduces the level of some
others from 'information' to 'debug' and
from 'debug' to 'trace'.
The goal is to make 'debug' level as useful as
possible to read the progress and be able to
understand what was going on, such that one is
able to decide which area to dig deeper into.
Then, trace level could be used to get more
insight into that area, if necessary. For
example, when investigating a test failure,
one first enables 'debug' logs and reads through.
Once a section between two debug entries is
identified as being of interest, enabling 'trace'
level logs becomes more productive as it's
now possible to easily reach the first DBG
entry and read through until the second one.
It's unfortunate that we don't have per-area
control for enabling/disabling logs, so it
is common to see more and more 'debug' log
entries added all around, making logs
less and less readable.
It is also a limitation of the levels we have
that we really only have 3 usable levels:
one, two, many. That is, 'information' for
the most important events, 'debug' for
technical details needed to investigate issues,
and 'trace' for everything else. ('warning'
and 'error' aren't really 'levels'; they have
semantics that makes them special-cases.)
So we have to avoid degrading one into the
other, or have differences without distinction.
If any of these entries are needed to be
displayed more frequently, changing them
back to 'debug' or even 'information' should
be done. Though for me they seem special
cases that don't benefit most log readings.
Change-Id: Id2c6a9dc027483b81a066b0b4b50a298c5eff449
Signed-off-by: Ashod Nakashian <ashod.nakashian@collabora.co.uk>
2022-02-19 07:30:26 -06:00
|
|
|
LOG_TRC("Tile " << tileIndex << " is " << data->size() << " bytes.");
|
2020-07-10 06:59:59 -05:00
|
|
|
std::unique_lock<std::mutex> pngLock(pngMutex);
|
|
|
|
output.insert(output.end(), data->begin(), data->end());
|
|
|
|
pngCache.addToCache(data, wireId, hash);
|
|
|
|
pushRendered(renderedTiles, tiles[tileIndex], wireId, data->size());
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
LOG_TRC("Encoded tile #" << tileIndex << " at (" << positionX << ',' << positionY << ") with oldWireId=" <<
|
|
|
|
tiles[tileIndex].getOldWireId() << ", hash=" << hash << " wireId: " << wireId << " in " << imgSize << " bytes.");
|
|
|
|
tileIndex++;
|
|
|
|
}
|
|
|
|
|
2020-08-07 12:37:53 -05:00
|
|
|
// empty ones come first
|
|
|
|
size_t zeroCheckStart = renderedTiles.size();
|
|
|
|
|
2020-07-10 06:59:59 -05:00
|
|
|
pngPool.run();
|
|
|
|
|
2020-08-07 12:37:53 -05:00
|
|
|
for (size_t i = zeroCheckStart; i < renderedTiles.size(); ++i)
|
2020-07-10 06:59:59 -05:00
|
|
|
{
|
2020-08-07 12:37:53 -05:00
|
|
|
if (renderedTiles[i].getImgSize() == 0)
|
2020-07-10 06:59:59 -05:00
|
|
|
{
|
2020-08-07 12:37:53 -05:00
|
|
|
LOG_TRC("Encoded 0-sized tile in slot !" << i);
|
2020-07-10 06:59:59 -05:00
|
|
|
assert(!"0-sized tile enocded!");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME: append duplicates - tragically for now as real duplicates
|
|
|
|
// we should append these as
|
|
|
|
{
|
|
|
|
size_t imgSize = -1;
|
|
|
|
assert(duplicateTiles.size() == duplicateHashes.size());
|
|
|
|
for (size_t i = 0; i < duplicateTiles.size(); ++i)
|
|
|
|
{
|
|
|
|
if (pngCache.copyFromCache(duplicateHashes[i], output, imgSize))
|
|
|
|
pushRendered(renderedTiles, duplicateTiles[i],
|
|
|
|
duplicateTiles[i].getWireId(), imgSize);
|
|
|
|
else
|
|
|
|
LOG_ERR("Horror - tile disappeared while rendering! " << duplicateHashes[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pngCache.balanceCache();
|
|
|
|
|
2020-12-06 13:51:54 -06:00
|
|
|
duration = std::chrono::steady_clock::now() - start;
|
2020-12-06 21:45:46 -06:00
|
|
|
const auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(duration);
|
|
|
|
LOG_DBG("rendering tiles at (" << renderArea.getLeft() << ", " << renderArea.getTop()
|
|
|
|
<< "), (" << renderArea.getWidth() << ", "
|
|
|
|
<< renderArea.getHeight() << ") "
|
|
|
|
<< " took " << elapsed << " (including the paintPartTile).");
|
2020-07-10 06:59:59 -05:00
|
|
|
|
|
|
|
if (tileIndex == 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
std::string tileMsg;
|
|
|
|
if (combined)
|
2020-07-12 16:43:51 -05:00
|
|
|
{
|
2020-07-10 06:59:59 -05:00
|
|
|
tileMsg = tileCombined.serialize("tilecombine:", ADD_DEBUG_RENDERID, renderedTiles);
|
|
|
|
|
2020-07-12 16:43:51 -05:00
|
|
|
LOG_TRC("Sending back painted tiles for " << tileMsg << " of size " << output.size() << " bytes) for: " << tileMsg);
|
2020-07-10 06:59:59 -05:00
|
|
|
|
2020-07-12 16:43:51 -05:00
|
|
|
std::unique_ptr<char[]> response;
|
|
|
|
const size_t responseSize = tileMsg.size() + output.size();
|
|
|
|
response.reset(new char[responseSize]);
|
|
|
|
std::copy(tileMsg.begin(), tileMsg.end(), response.get());
|
|
|
|
std::copy(output.begin(), output.end(), response.get() + tileMsg.size());
|
|
|
|
outputMessage(response.get(), responseSize);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
size_t outputOffset = 0;
|
|
|
|
for (auto &i : renderedTiles)
|
|
|
|
{
|
|
|
|
tileMsg = i.serialize("tile:", ADD_DEBUG_RENDERID);
|
|
|
|
const size_t responseSize = tileMsg.size() + i.getImgSize();
|
|
|
|
std::unique_ptr<char[]> response;
|
|
|
|
response.reset(new char[responseSize]);
|
|
|
|
std::copy(tileMsg.begin(), tileMsg.end(), response.get());
|
|
|
|
std::copy(output.begin() + outputOffset, output.begin() + outputOffset + i.getImgSize(), response.get() + tileMsg.size());
|
|
|
|
outputMessage(response.get(), responseSize);
|
|
|
|
outputOffset += i.getImgSize();
|
|
|
|
}
|
|
|
|
}
|
2020-07-10 06:59:59 -05:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* vim:set shiftwidth=4 softtabstop=4 expandtab: */
|