mirror of
https://github.com/vgough/encfs.git
synced 2024-11-21 23:43:26 +01:00
cleanup lint warnings, run clang-format
This commit is contained in:
parent
e2e1fadfa9
commit
9089b8555f
@ -168,7 +168,21 @@ if (${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION} GREATER 3.5) # Need 3.6 or abo
|
||||
message(STATUS "clang-tidy not found.")
|
||||
else()
|
||||
message(STATUS "clang-tidy found: ${CLANG_TIDY_EXE}")
|
||||
set(DO_CLANG_TIDY "${CLANG_TIDY_EXE}" "-checks=*,-modernize-loop-convert,-cppcoreguidelines-pro-*,-readability-inconsistent-declaration-parameter-name,-google-readability-casting,-cert-err58-cpp,-google-runtime-int,-readability-named-parameter,-google-build-using-namespace,-misc-unused-parameters,-google-runtime-references")
|
||||
string(CONCAT TIDY_OPTS "-checks=*"
|
||||
",-cert-err58-cpp"
|
||||
",-cppcoreguidelines-pro-*"
|
||||
",-google-build-using-namespace"
|
||||
",-google-readability-casting"
|
||||
",-google-readability-todo"
|
||||
",-google-runtime-int"
|
||||
",-google-runtime-references"
|
||||
",-misc-misplaced-widening-cast"
|
||||
",-misc-unused-parameters"
|
||||
",-modernize-loop-convert"
|
||||
",-readability-inconsistent-declaration-parameter-name"
|
||||
",-readability-named-parameter"
|
||||
)
|
||||
set(DO_CLANG_TIDY "${CLANG_TIDY_EXE}" ${TIDY_OPTS})
|
||||
#set(DO_CLANG_TIDY "${CLANG_TIDY_EXE}" "-fix" "-checks=-*,google-readability-redundant-smartptr-get")
|
||||
endif()
|
||||
else()
|
||||
|
@ -121,7 +121,8 @@ ssize_t BlockFileIO::cacheWriteOneBlock(const IORequest &req) {
|
||||
ssize_t BlockFileIO::read(const IORequest &req) const {
|
||||
CHECK(_blockSize != 0);
|
||||
|
||||
int partialOffset = req.offset % _blockSize; //can be int as _blockSize is int
|
||||
int partialOffset =
|
||||
req.offset % _blockSize; // can be int as _blockSize is int
|
||||
off_t blockNum = req.offset / _blockSize;
|
||||
ssize_t result = 0;
|
||||
|
||||
@ -159,7 +160,9 @@ ssize_t BlockFileIO::read(const IORequest &req) const {
|
||||
result = readSize;
|
||||
break;
|
||||
}
|
||||
if (readSize <= partialOffset) break; // didn't get enough bytes
|
||||
if (readSize <= partialOffset) {
|
||||
break; // didn't get enough bytes
|
||||
}
|
||||
|
||||
size_t cpySize = min((size_t)(readSize - partialOffset), size);
|
||||
CHECK(cpySize <= readSize);
|
||||
@ -200,7 +203,8 @@ ssize_t BlockFileIO::write(const IORequest &req) {
|
||||
|
||||
// where write request begins
|
||||
off_t blockNum = req.offset / _blockSize;
|
||||
int partialOffset = req.offset % _blockSize; //can be int as _blockSize is int
|
||||
int partialOffset =
|
||||
req.offset % _blockSize; // can be int as _blockSize is int
|
||||
|
||||
// last block of file (for testing write overlaps with file boundary)
|
||||
off_t lastFileBlock = fileSize / _blockSize;
|
||||
@ -267,7 +271,7 @@ ssize_t BlockFileIO::write(const IORequest &req) {
|
||||
|
||||
if (blockNum > lastNonEmptyBlock) {
|
||||
// just pad..
|
||||
blockReq.dataLen = toCopy + partialOffset;
|
||||
blockReq.dataLen = partialOffset + toCopy;
|
||||
} else {
|
||||
// have to merge with existing block data..
|
||||
blockReq.dataLen = _blockSize;
|
||||
@ -379,7 +383,7 @@ int BlockFileIO::padFile(off_t oldSize, off_t newSize, bool forceWrite) {
|
||||
}
|
||||
|
||||
// 3. only necessary if write is forced and block is non 0 length
|
||||
if ((res >= 0) && forceWrite && newBlockSize) {
|
||||
if ((res >= 0) && forceWrite && (newBlockSize != 0)) {
|
||||
req.offset = newLastBlock * _blockSize;
|
||||
req.dataLen = newBlockSize;
|
||||
memset(mb.data, 0, req.dataLen);
|
||||
|
@ -166,8 +166,9 @@ int BlockNameIO::encodeName(const char *plaintextName, int length, uint64_t *iv,
|
||||
bool ok;
|
||||
ok = _cipher->blockEncode((unsigned char *)encodedName + 2, length + padding,
|
||||
(uint64_t)mac ^ tmpIV, _key);
|
||||
if (!ok)
|
||||
if (!ok) {
|
||||
throw Error("block encode failed in filename encode");
|
||||
}
|
||||
|
||||
// convert to base 64 ascii
|
||||
int encodedStreamLen = length + 2 + padding;
|
||||
@ -225,8 +226,9 @@ int BlockNameIO::decodeName(const char *encodedName, int length, uint64_t *iv,
|
||||
bool ok;
|
||||
ok = _cipher->blockDecode((unsigned char *)tmpBuf + 2, decodedStreamLen,
|
||||
(uint64_t)mac ^ tmpIV, _key);
|
||||
if (!ok)
|
||||
if (!ok) {
|
||||
throw Error("block decode failed in filename decode");
|
||||
}
|
||||
|
||||
// find out true string length
|
||||
int padding = (unsigned char)tmpBuf[2 + decodedStreamLen - 1];
|
||||
|
@ -106,11 +106,10 @@ bool CipherFileIO::setIV(uint64_t iv) {
|
||||
// duh -- there are no file headers for directories!
|
||||
externalIV = iv;
|
||||
return base->setIV(iv);
|
||||
} else {
|
||||
}
|
||||
VLOG(1) << "setIV failed to re-open for write";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (fileIV == 0) {
|
||||
if (initHeader() < 0) {
|
||||
return false;
|
||||
@ -266,11 +265,7 @@ bool CipherFileIO::writeHeader() {
|
||||
req.data = buf;
|
||||
req.dataLen = 8;
|
||||
|
||||
if (base->write(req) < 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
return (base->write(req) >= 0);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -319,8 +314,9 @@ int CipherFileIO::generateReverseHeader(unsigned char *headerBuf) {
|
||||
VLOG(1) << "fileIV=" << fileIV;
|
||||
|
||||
// Encrypt externally-visible header
|
||||
if(!cipher->streamEncode(headerBuf, HEADER_SIZE, externalIV, key))
|
||||
if (!cipher->streamEncode(headerBuf, HEADER_SIZE, externalIV, key)) {
|
||||
return -EBADMSG;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -352,9 +348,13 @@ ssize_t CipherFileIO::readOneBlock(const IORequest &req) const {
|
||||
|
||||
if (readSize != bs) {
|
||||
VLOG(1) << "streamRead(data, " << readSize << ", IV)";
|
||||
ok = streamRead(tmpReq.data, (int)readSize, blockNum ^ fileIV); //cast works because we work on a block and blocksize fit an int
|
||||
ok = streamRead(tmpReq.data, (int)readSize,
|
||||
blockNum ^ fileIV); // cast works because we work on a
|
||||
// block and blocksize fit an int
|
||||
} else {
|
||||
ok = blockRead(tmpReq.data, (int)readSize, blockNum ^ fileIV); //cast works because we work on a block and blocksize fit an int
|
||||
ok = blockRead(tmpReq.data, (int)readSize,
|
||||
blockNum ^ fileIV); // cast works because we work on a
|
||||
// block and blocksize fit an int
|
||||
}
|
||||
|
||||
if (!ok) {
|
||||
@ -389,9 +389,13 @@ ssize_t CipherFileIO::writeOneBlock(const IORequest &req) {
|
||||
|
||||
bool ok;
|
||||
if (req.dataLen != bs) {
|
||||
ok = streamWrite(req.data, (int)req.dataLen, blockNum ^ fileIV); //cast works because we work on a block and blocksize fit an int
|
||||
ok = streamWrite(req.data, (int)req.dataLen,
|
||||
blockNum ^ fileIV); // cast works because we work on a
|
||||
// block and blocksize fit an int
|
||||
} else {
|
||||
ok = blockWrite(req.data, (int)req.dataLen, blockNum ^ fileIV); //cast works because we work on a block and blocksize fit an int
|
||||
ok = blockWrite(req.data, (int)req.dataLen,
|
||||
blockNum ^ fileIV); // cast works because we work on a
|
||||
// block and blocksize fit an int
|
||||
}
|
||||
|
||||
ssize_t res = 0;
|
||||
@ -400,8 +404,9 @@ ssize_t CipherFileIO::writeOneBlock(const IORequest &req) {
|
||||
IORequest tmpReq = req;
|
||||
tmpReq.offset += HEADER_SIZE;
|
||||
res = base->write(tmpReq);
|
||||
} else
|
||||
} else {
|
||||
res = base->write(req);
|
||||
}
|
||||
} else {
|
||||
VLOG(1) << "encodeBlock failed for block " << blockNum << ", size "
|
||||
<< req.dataLen;
|
||||
@ -536,7 +541,8 @@ ssize_t CipherFileIO::read(const IORequest &origReq) const {
|
||||
VLOG(1) << "Adding " << headerBytes << " header bytes";
|
||||
|
||||
// copy the header bytes into the data
|
||||
int headerOffset = HEADER_SIZE - headerBytes; //can be int as HEADER_SIZE is int
|
||||
int headerOffset =
|
||||
HEADER_SIZE - headerBytes; // can be int as HEADER_SIZE is int
|
||||
memcpy(req.data, &headerBuf[headerOffset], headerBytes);
|
||||
|
||||
// the read does not want data beyond the header
|
||||
@ -559,7 +565,8 @@ ssize_t CipherFileIO::read(const IORequest &origReq) const {
|
||||
if (readBytes < 0) {
|
||||
return readBytes; // Return error code
|
||||
}
|
||||
ssize_t sum = headerBytes + readBytes; //could be size_t, but as we return ssize_t...
|
||||
ssize_t sum =
|
||||
headerBytes + readBytes; // could be size_t, but as we return ssize_t...
|
||||
VLOG(1) << "returning sum=" << sum;
|
||||
return sum;
|
||||
}
|
||||
|
@ -111,7 +111,8 @@ void EncFS_Context::renameNode(const char *from, const char *to) {
|
||||
|
||||
// putNode stores "node" under key "path" in the "openFiles" map. It
|
||||
// increments the reference count if the key already exists.
|
||||
void EncFS_Context::putNode(const char *path, std::shared_ptr<FileNode> node) {
|
||||
void EncFS_Context::putNode(const char *path,
|
||||
const std::shared_ptr<FileNode> &node) {
|
||||
Lock lock(contextMutex);
|
||||
auto &list = openFiles[std::string(path)];
|
||||
// The length of "list" serves as the reference count.
|
||||
@ -122,7 +123,7 @@ void EncFS_Context::putNode(const char *path, std::shared_ptr<FileNode> node) {
|
||||
// eraseNode is called by encfs_release in response to the RELEASE
|
||||
// FUSE-command we get from the kernel.
|
||||
void EncFS_Context::eraseNode(const char *path,
|
||||
std::shared_ptr<FileNode> fnode) {
|
||||
const std::shared_ptr<FileNode> &fnode) {
|
||||
Lock lock(contextMutex);
|
||||
|
||||
auto it = openFiles.find(std::string(path));
|
||||
@ -151,7 +152,7 @@ void EncFS_Context::eraseNode(const char *path,
|
||||
|
||||
// nextFuseFh returns the next unused uint64 to serve as the FUSE file
|
||||
// handle for the kernel.
|
||||
uint64_t EncFS_Context::nextFuseFh(void) {
|
||||
uint64_t EncFS_Context::nextFuseFh() {
|
||||
// This is thread-safe because currentFuseFh is declared as std::atomic
|
||||
return currentFuseFh++;
|
||||
}
|
||||
|
@ -48,9 +48,9 @@ class EncFS_Context {
|
||||
|
||||
void getAndResetUsageCounter(int *usage, int *openCount);
|
||||
|
||||
void putNode(const char *path, std::shared_ptr<FileNode> node);
|
||||
void putNode(const char *path, const std::shared_ptr<FileNode> &node);
|
||||
|
||||
void eraseNode(const char *path, std::shared_ptr<FileNode> fnode);
|
||||
void eraseNode(const char *path, const std::shared_ptr<FileNode> &fnode);
|
||||
|
||||
void renameNode(const char *oldName, const char *newName);
|
||||
|
||||
|
@ -149,7 +149,7 @@ class RenameOp {
|
||||
|
||||
~RenameOp();
|
||||
|
||||
operator bool() const { return renameList != nullptr; }
|
||||
explicit operator bool() const { return renameList != nullptr; }
|
||||
|
||||
bool apply();
|
||||
void undo();
|
||||
|
@ -206,7 +206,8 @@ ssize_t RawFileIO::read(const IORequest &req) const {
|
||||
|
||||
if (readSize < 0) {
|
||||
int eno = errno;
|
||||
errno = 0; //just to be sure error seen in integration tests really comes from the function rc.
|
||||
errno = 0; // just to be sure error seen in integration tests really comes
|
||||
// from the function rc.
|
||||
RLOG(WARNING) << "read failed at offset " << req.offset << " for "
|
||||
<< req.dataLen << " bytes: " << strerror(eno);
|
||||
return -eno;
|
||||
@ -225,23 +226,25 @@ ssize_t RawFileIO::write(const IORequest &req) {
|
||||
off_t offset = req.offset;
|
||||
|
||||
/*
|
||||
* Let's write while pwrite() writes, to avoid writing only a part of the request,
|
||||
* whereas it could have been fully written. This to avoid inconsistencies / corruption.
|
||||
* Let's write while pwrite() writes, to avoid writing only a part of the
|
||||
* request,
|
||||
* whereas it could have been fully written. This to avoid inconsistencies /
|
||||
* corruption.
|
||||
*/
|
||||
// while ((bytes != 0) && retrys > 0) {
|
||||
while (bytes != 0) {
|
||||
ssize_t writeSize = ::pwrite(fd, buf, bytes, offset);
|
||||
|
||||
if (writeSize <= 0) {
|
||||
if (writeSize < 0) {
|
||||
int eno = errno;
|
||||
errno = 0; //just to be sure error seen in integration tests really comes from the function rc.
|
||||
knownSize = false;
|
||||
RLOG(WARNING) << "write failed at offset " << offset << " for " << bytes
|
||||
<< " bytes: " << strerror(eno);
|
||||
// pwrite is not expected to return 0, so eno should always be set, but we never know...
|
||||
if (eno) {
|
||||
// pwrite is not expected to return 0, so eno should always be set, but we
|
||||
// never know...
|
||||
return -eno;
|
||||
}
|
||||
if (writeSize == 0) {
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
@ -251,7 +254,8 @@ ssize_t RawFileIO::write(const IORequest &req) {
|
||||
}
|
||||
|
||||
// if (bytes != 0) {
|
||||
// RLOG(ERROR) << "Write error: wrote " << req.dataLen - bytes << " bytes of "
|
||||
// RLOG(ERROR) << "Write error: wrote " << req.dataLen - bytes << " bytes of
|
||||
// "
|
||||
// << req.dataLen << ", max retries reached";
|
||||
// knownSize = false;
|
||||
// return (eno) ? -eno : -EIO;
|
||||
|
@ -106,7 +106,7 @@ int BytesToKey(int keyLen, int ivLen, const EVP_MD *md,
|
||||
memcpy(iv, mdBuf + offset, toCopy);
|
||||
iv += toCopy;
|
||||
niv -= toCopy;
|
||||
offset += toCopy;
|
||||
// offset += toCopy;
|
||||
}
|
||||
if ((nkey == 0) && (niv == 0)) {
|
||||
break;
|
||||
@ -170,12 +170,14 @@ static Range CAMELLIABlockRange(64, 4096, 16);
|
||||
|
||||
static std::shared_ptr<Cipher> NewCAMELLIACipher(const Interface &iface,
|
||||
int keyLen) {
|
||||
if (keyLen <= 0) keyLen = 192;
|
||||
if (keyLen <= 0) {
|
||||
keyLen = 192;
|
||||
}
|
||||
|
||||
keyLen = CAMELLIAKeyRange.closest(keyLen);
|
||||
|
||||
const EVP_CIPHER *blockCipher = 0;
|
||||
const EVP_CIPHER *streamCipher = 0;
|
||||
const EVP_CIPHER *blockCipher = nullptr;
|
||||
const EVP_CIPHER *streamCipher = nullptr;
|
||||
|
||||
switch (keyLen) {
|
||||
case 128:
|
||||
@ -503,7 +505,7 @@ CipherKey SSL_Cipher::newRandomKey() {
|
||||
compute a 64-bit check value for the data using HMAC.
|
||||
*/
|
||||
static uint64_t _checksum_64(SSLKey *key, const unsigned char *data,
|
||||
int dataLen, uint64_t *chainedIV) {
|
||||
int dataLen, uint64_t *const chainedIV) {
|
||||
rAssert(dataLen > 0);
|
||||
Lock lock(key->mutex);
|
||||
|
||||
|
@ -103,7 +103,7 @@ bool XmlValue::readB64(const char *path, unsigned char *data,
|
||||
|
||||
std::string s = value->text();
|
||||
s.erase(std::remove_if(s.begin(), s.end(), ::isspace), s.end());
|
||||
s.erase(s.find_last_not_of("=") + 1);
|
||||
s.erase(s.find_last_not_of('=') + 1);
|
||||
|
||||
int decodedSize = B64ToB256Bytes(s.size());
|
||||
if (decodedSize != length) {
|
||||
|
@ -25,13 +25,13 @@
|
||||
#include <cstring>
|
||||
#include <ctime>
|
||||
#include <fcntl.h>
|
||||
#include <limits>
|
||||
#include <memory>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/statvfs.h>
|
||||
#include <sys/time.h>
|
||||
#include <unistd.h>
|
||||
#include <utime.h>
|
||||
#include <limits>
|
||||
#ifdef linux
|
||||
#include <sys/fsuid.h>
|
||||
#endif
|
||||
@ -65,7 +65,7 @@ using namespace std::placeholders;
|
||||
|
||||
namespace encfs {
|
||||
|
||||
#define GET_FN(ctx, finfo) ctx->getNode((void *)(uintptr_t)finfo->fh)
|
||||
#define GET_FN(ctx, finfo) (ctx)->getNode((void *)(uintptr_t)(finfo)->fh)
|
||||
|
||||
static EncFS_Context *context() {
|
||||
return (EncFS_Context *)fuse_get_context()->private_data;
|
||||
@ -79,8 +79,9 @@ static EncFS_Context *context() {
|
||||
static bool isReadOnly(EncFS_Context *ctx) { return ctx->opts->readOnly; }
|
||||
|
||||
// helper function -- apply a functor to a cipher path, given the plain path
|
||||
static int withCipherPath(const char *opName, const char *path,
|
||||
function<int(EncFS_Context *, const string &)> op,
|
||||
static int withCipherPath(
|
||||
const char *opName, const char *path,
|
||||
const function<int(EncFS_Context *, const string &)> &op,
|
||||
bool passReturnCode = false) {
|
||||
EncFS_Context *ctx = context();
|
||||
|
||||
@ -110,7 +111,7 @@ static int withCipherPath(const char *opName, const char *path,
|
||||
return res;
|
||||
}
|
||||
|
||||
static void checkCanary(std::shared_ptr<FileNode> fnode) {
|
||||
static void checkCanary(const std::shared_ptr<FileNode> &fnode) {
|
||||
if (fnode->canary == CANARY_OK) {
|
||||
return;
|
||||
}
|
||||
@ -693,7 +694,8 @@ ssize_t _do_read(FileNode *fnode, unsigned char *ptr, size_t size, off_t off) {
|
||||
|
||||
int encfs_read(const char *path, char *buf, size_t size, off_t offset,
|
||||
struct fuse_file_info *file) {
|
||||
//Unfortunately we have to convert from ssize_t (pread) to int (fuse), so let's check this will be OK
|
||||
// Unfortunately we have to convert from ssize_t (pread) to int (fuse), so
|
||||
// let's check this will be OK
|
||||
if (size > std::numeric_limits<int>::max()) {
|
||||
RLOG(ERROR) << "tried to read too much data: " << size;
|
||||
return -EIO;
|
||||
@ -714,13 +716,15 @@ int encfs_fsync(const char *path, int dataSync, struct fuse_file_info *file) {
|
||||
return withFileNode("fsync", path, file, bind(_do_fsync, _1, dataSync));
|
||||
}
|
||||
|
||||
ssize_t _do_write(FileNode *fnode, unsigned char *ptr, size_t size, off_t offset) {
|
||||
ssize_t _do_write(FileNode *fnode, unsigned char *ptr, size_t size,
|
||||
off_t offset) {
|
||||
return fnode->write(offset, ptr, size);
|
||||
}
|
||||
|
||||
int encfs_write(const char *path, const char *buf, size_t size, off_t offset,
|
||||
struct fuse_file_info *file) {
|
||||
//Unfortunately we have to convert from ssize_t (pwrite) to int (fuse), so let's check this will be OK
|
||||
// Unfortunately we have to convert from ssize_t (pwrite) to int (fuse), so
|
||||
// let's check this will be OK
|
||||
if (size > std::numeric_limits<int>::max()) {
|
||||
RLOG(ERROR) << "tried to write too much data: " << size;
|
||||
return -EIO;
|
||||
|
Loading…
Reference in New Issue
Block a user