From 2f7ac98e34e07f1014f546895b4136add336624e Mon Sep 17 00:00:00 2001 From: David Schwartz Date: Thu, 20 Feb 2014 09:30:21 -0800 Subject: [PATCH] Improve I/O when fetching ledgers * negative cache for node store * async fetch, thread pool for node store * read barrier logic for node store * SHAMap getMissingNodesNB (non-blocking) * non-blocking getMissingNodes traverse * tune caches --- src/ripple/common/KeyCache.h | 12 + src/ripple_app/main/Application.cpp | 4 +- src/ripple_app/main/Tuning.h | 2 +- src/ripple_app/shamap/SHAMap.cpp | 72 +++++- src/ripple_app/shamap/SHAMap.h | 4 + src/ripple_app/shamap/SHAMapSync.cpp | 135 ++++++---- src/ripple_core/functional/Config.cpp | 4 +- src/ripple_core/nodestore/NodeStore.cpp | 1 + src/ripple_core/nodestore/api/Database.h | 17 ++ src/ripple_core/nodestore/api/Manager.h | 3 +- src/ripple_core/nodestore/impl/DatabaseImp.h | 236 +++++++++++++----- src/ripple_core/nodestore/impl/Manager.cpp | 4 +- .../nodestore/tests/DatabaseTests.cpp | 12 +- 13 files changed, 370 insertions(+), 136 deletions(-) diff --git a/src/ripple/common/KeyCache.h b/src/ripple/common/KeyCache.h index 592078d76..fd78d8c28 100644 --- a/src/ripple/common/KeyCache.h +++ b/src/ripple/common/KeyCache.h @@ -157,6 +157,18 @@ public: m_map.clear (); } + void setTargetSize (size_type s) + { + lock_guard lock (m_mutex); + m_target_size = s; + } + + void setTargetAge (size_type s) + { + lock_guard lock (m_mutex); + m_target_age = std::chrono::seconds (s); + } + /** Returns `true` if the key was found. Does not update the last access time. */ diff --git a/src/ripple_app/main/Application.cpp b/src/ripple_app/main/Application.cpp index 9584b4049..fb7b2385c 100644 --- a/src/ripple_app/main/Application.cpp +++ b/src/ripple_app/main/Application.cpp @@ -280,7 +280,7 @@ public: , m_rpcServerHandler (*m_networkOPs, *m_resourceManager) // passive object, not a Service , m_nodeStore (m_nodeStoreManager->make_Database ("NodeStore.main", m_nodeStoreScheduler, - LogPartition::getJournal (), + LogPartition::getJournal (), 4, // four read threads for now getConfig ().nodeDatabase, getConfig ().ephemeralNodeDatabase)) , m_sntpClient (SNTPClient::New (*this)) @@ -1356,7 +1356,7 @@ void ApplicationImp::updateTables () NodeStore::DummyScheduler scheduler; std::unique_ptr source ( m_nodeStoreManager->make_Database ("NodeStore.import", scheduler, - LogPartition::getJournal (), + LogPartition::getJournal (), 0, getConfig ().importNodeDatabase)); WriteLog (lsWARNING, NodeObject) << diff --git a/src/ripple_app/main/Tuning.h b/src/ripple_app/main/Tuning.h index 632449103..fe70614b8 100644 --- a/src/ripple_app/main/Tuning.h +++ b/src/ripple_app/main/Tuning.h @@ -26,7 +26,7 @@ enum { fullBelowTargetSize = 524288 - ,fullBelowExpirationSeconds = 240 + ,fullBelowExpirationSeconds = 600 }; } diff --git a/src/ripple_app/shamap/SHAMap.cpp b/src/ripple_app/shamap/SHAMap.cpp index b6f273e3a..53a40fa85 100644 --- a/src/ripple_app/shamap/SHAMap.cpp +++ b/src/ripple_app/shamap/SHAMap.cpp @@ -899,6 +899,76 @@ SHAMapTreeNode::pointer SHAMap::fetchNodeExternal (const SHAMapNode& id, uint256 return ret; } +// Non-blocking version +SHAMapTreeNode* SHAMap::getNodeAsync ( + const SHAMapNode& id, + uint256 const& hash, + SHAMapSyncFilter *filter, + bool& pending) +{ + pending = false; + + // If the node is in mTNByID, return it + SHAMapTreeNode::pointer ptr = mTNByID.retrieve (id); + if (ptr) + return ptr.get (); + + // Try the tree node cache + ptr = getCache (hash, id); + + if (!ptr) + { + + // Try the filter + if (filter) + { + Blob nodeData; + if (filter->haveNode (id, hash, nodeData)) + { + ptr = boost::make_shared ( + boost::cref (id), boost::cref (nodeData), 0, snfPREFIX, boost::cref (hash), true); + filter->gotNode (true, id, hash, nodeData, ptr->getType ()); + } + } + + if (!ptr) + { + NodeObject::pointer obj; + + if (!getApp().getNodeStore().asyncFetch (hash, obj)) + { // We would have to block + pending = true; + assert (!obj); + return nullptr; + } + + if (!obj) + return nullptr; + + ptr = boost::make_shared (id, obj->getData(), 0, snfPREFIX, hash, true); + if (id != *ptr) + { + assert (false); + return nullptr; + } + } + + // Put it in the tree node cache + canonicalize (hash, ptr); + } + + if (id.isRoot ()) + { + // It is legal to replace the root + mTNByID.replace (id, ptr); + root = ptr; + } + else + mTNByID.canonicalize (id, &ptr); + + return ptr.get (); +} + /** Look at the cache and back end (things external to this SHAMap) to find a tree node. Only a read lock is required because mTNByID has its own, internal synchronization. Every thread calling this function must @@ -953,7 +1023,7 @@ SHAMapTreeNode::pointer SHAMap::fetchNodeExternalNT (const SHAMapNode& id, uint2 return SHAMapTreeNode::pointer (); } - // Share this immutable tree node in thre TreeNodeCache + // Share this immutable tree node in the TreeNodeCache canonicalize (hash, ret); } catch (...) diff --git a/src/ripple_app/shamap/SHAMap.h b/src/ripple_app/shamap/SHAMap.h index 848a7c528..15c6f94ff 100644 --- a/src/ripple_app/shamap/SHAMap.h +++ b/src/ripple_app/shamap/SHAMap.h @@ -293,6 +293,10 @@ private: SHAMapTreeNode* firstBelow (SHAMapTreeNode*); SHAMapTreeNode* lastBelow (SHAMapTreeNode*); + // Non-blocking version of getNodePointerNT + SHAMapTreeNode* getNodeAsync ( + const SHAMapNode & id, uint256 const & hash, SHAMapSyncFilter * filter, bool& pending); + SHAMapItem::pointer onlyBelow (SHAMapTreeNode*); void eraseChildren (SHAMapTreeNode::pointer); void dropBelow (SHAMapTreeNode*); diff --git a/src/ripple_app/shamap/SHAMapSync.cpp b/src/ripple_app/shamap/SHAMapSync.cpp index 47254682d..ccba69df4 100644 --- a/src/ripple_app/shamap/SHAMapSync.cpp +++ b/src/ripple_app/shamap/SHAMapSync.cpp @@ -119,6 +119,7 @@ void SHAMap::getMissingNodes (std::vector& nodeIDs, std::vectorisValid ()); assert (root->getNodeHash().isNonZero ()); + if (root->isFullBelow ()) { clearSynching (); @@ -131,74 +132,102 @@ void SHAMap::getMissingNodes (std::vector& nodeIDs, std::vector stack; + // Track the missing hashes we have found so far + std::set missingHashes; - SHAMapTreeNode* node = root.get (); - int firstChild = rand() % 256; - int currentChild = 0; - bool fullBelow = true; - - do + while (1) { - while (currentChild < 16) + + std::stack stack; + int deferCount = 0; + + // Traverse the map without blocking + + SHAMapTreeNode *node = root.get (); + int firstChild = rand() % 256; + int currentChild = 0; + bool fullBelow = true; + + do { - int branch = (firstChild + ++currentChild) % 16; - if (!node->isEmptyBranch (branch)) + while (currentChild < 16) { - uint256 const& childHash = node->getChildHash (branch); - - if (! m_fullBelowCache.touch_if_exists (childHash)) + int branch = (firstChild + currentChild++) % 16; + if (!node->isEmptyBranch (branch)) { - SHAMapNode childID = node->getChildNodeID (branch); - SHAMapTreeNode* d = getNodePointerNT (childID, childHash, filter); + uint256 const& childHash = node->getChildHash (branch); - if (!d) - { // node is not in the database - nodeIDs.push_back (childID); - hashes.push_back (childHash); - - if (--max <= 0) - return; - - fullBelow = false; // This node is definitely not full below - } - else if (d->isInner () && !d->isFullBelow ()) + if (! m_fullBelowCache.touch_if_exists (childHash)) { - stack.push (GMNEntry( node, firstChild, currentChild, fullBelow)); + SHAMapNode childID = node->getChildNodeID (branch); + bool pending = false; + SHAMapTreeNode* d = getNodeAsync (childID, childHash, filter, pending); - // Switch to processing the child node - node = d; - firstChild = rand() % 256; - currentChild = 0; - fullBelow = true; + if (!d) + { + if (!pending) + { // node is not in the database + if (missingHashes.insert (childHash).second) + { + nodeIDs.push_back (childID); + hashes.push_back (childHash); + + if (--max <= 0) + return; + } + } + else + { + // read is deferred + ++deferCount; + } + + fullBelow = false; // This node is not known full below + } + else if (d->isInner () && !d->isFullBelow ()) + { + stack.push (GMNEntry (node, firstChild, currentChild, fullBelow)); + + // Switch to processing the child node + node = d; + firstChild = rand() % 256; + currentChild = 0; + fullBelow = true; + } } } } + + // We are done with this inner node (and thus all of its children) + + if (fullBelow) + { // No partial node encountered below this node + node->setFullBelow (); + if (mType == smtSTATE) + m_fullBelowCache.insert (node->getNodeHash ()); + } + + if (stack.empty ()) + node = NULL; // Finished processing the last node, we are done + else + { // Pick up where we left off (above this node) + GMNEntry& next = stack.top (); + node = next.node; + firstChild = next.firstChild; + currentChild = next.currentChild; + fullBelow = (fullBelow && next.fullBelow); // was and still is + stack.pop (); + } + } + while (node != NULL); - // We are done with this inner node (and thus all of its children) - - if (fullBelow) - { // No partial node encountered below this node - node->setFullBelow (); - if (mType == smtSTATE) - m_fullBelowCache.insert (node->getNodeHash ()); - } - - if (stack.empty ()) - node = NULL; // Finished processing the last node, we are done - else - { // Pick up where we left off (above this node) - GMNEntry& next = stack.top (); - node = next.node; - firstChild = next.firstChild; - currentChild = next.currentChild; - fullBelow = (fullBelow && next.fullBelow); // was and still is - stack.pop (); - } + // If we didn't defer any reads, we're done + if (deferCount == 0) + break; + getApp().getNodeStore().waitReads(); } - while (node != NULL); if (nodeIDs.empty ()) clearSynching (); diff --git a/src/ripple_core/functional/Config.cpp b/src/ripple_core/functional/Config.cpp index 3c6f48f57..4c16394e7 100644 --- a/src/ripple_core/functional/Config.cpp +++ b/src/ripple_core/functional/Config.cpp @@ -604,8 +604,8 @@ int Config::getSize (SizedItemName item) { siValidationsSize, { 256, 256, 512, 1024, 1024 } }, { siValidationsAge, { 500, 500, 500, 500, 500 } }, - { siNodeCacheSize, { 8192, 16384, 32768, 131072, 0 } }, - { siNodeCacheAge, { 30, 60, 90, 120, 900 } }, + { siNodeCacheSize, { 16384, 32768, 131072, 262144, 0 } }, + { siNodeCacheAge, { 60, 90, 120, 900, 0 } }, { siTreeCacheSize, { 8192, 65536, 131072, 131072, 0 } }, { siTreeCacheAge, { 30, 60, 90, 120, 900 } }, diff --git a/src/ripple_core/nodestore/NodeStore.cpp b/src/ripple_core/nodestore/NodeStore.cpp index b10c366fa..eed5b64f9 100644 --- a/src/ripple_core/nodestore/NodeStore.cpp +++ b/src/ripple_core/nodestore/NodeStore.cpp @@ -29,6 +29,7 @@ #include "../../ripple/common/seconds_clock.h" #include "../../ripple/common/TaggedCache.h" +#include "../../ripple/common/KeyCache.h" #include "impl/Tuning.h" # include "impl/DecodedBlob.h" diff --git a/src/ripple_core/nodestore/api/Database.h b/src/ripple_core/nodestore/api/Database.h index f202f7948..3e8bcf0c4 100644 --- a/src/ripple_core/nodestore/api/Database.h +++ b/src/ripple_core/nodestore/api/Database.h @@ -62,6 +62,23 @@ public: */ virtual NodeObject::pointer fetch (uint256 const& hash) = 0; + /** Fetch an object without waiting. + If I/O is required to determine whether or not the object is present, + `false` is returned. Otherwise, `true` is returned and `object` is set + to refer to the object, or `nullptr` if the object is not present. + If I/O is required, the I/O is scheduled. + + @note This can be called concurrently. + @param hash The key of the object to retrieve + @param object The object retrieved + @return Whether the operation completed + */ + virtual bool asyncFetch (uint256 const& hash, NodeObject::pointer& object) = 0; + + /** Wait for all currently pending async reads to complete. + */ + virtual void waitReads () = 0; + /** Store the object. The caller's Blob parameter is overwritten. diff --git a/src/ripple_core/nodestore/api/Manager.h b/src/ripple_core/nodestore/api/Manager.h index e0f9d85d6..3aba115b7 100644 --- a/src/ripple_core/nodestore/api/Manager.h +++ b/src/ripple_core/nodestore/api/Manager.h @@ -63,13 +63,14 @@ public: @param name A diagnostic label for the database. @param scheduler The scheduler to use for performing asynchronous tasks. + @param readThreads The number of async read threads to create @param backendParameters The parameter string for the persistent backend. @param fastBackendParameters [optional] The parameter string for the ephemeral backend. @return The opened database. */ virtual std::unique_ptr make_Database (std::string const& name, - Scheduler& scheduler, Journal journal, + Scheduler& scheduler, Journal journal, int readThreads, Parameters const& backendParameters, Parameters fastBackendParameters = Parameters ()) = 0; }; diff --git a/src/ripple_core/nodestore/impl/DatabaseImp.h b/src/ripple_core/nodestore/impl/DatabaseImp.h index ef1004f70..ed3f21be2 100644 --- a/src/ripple_core/nodestore/impl/DatabaseImp.h +++ b/src/ripple_core/nodestore/impl/DatabaseImp.h @@ -20,6 +20,9 @@ #ifndef RIPPLE_NODESTORE_DATABASEIMP_H_INCLUDED #define RIPPLE_NODESTORE_DATABASEIMP_H_INCLUDED +#include +#include + namespace ripple { namespace NodeStore { @@ -34,10 +37,25 @@ public: std::unique_ptr m_backend; // Larger key/value storage, but not necessarily persistent. std::unique_ptr m_fastBackend; + + // Positive cache TaggedCache m_cache; + // Negative cache + KeyCache m_negCache; + + std::mutex m_readLock; + std::condition_variable m_readCondVar; + std::condition_variable m_readGenCondVar; + std::set m_readSet; // set of reads to do + uint256 m_readLast; // last hash read + std::vector m_readThreads; + bool m_readShut; + uint64_t m_readGen; // current read generation + DatabaseImp (std::string const& name, Scheduler& scheduler, + int readThreads, std::unique_ptr backend, std::unique_ptr fastBackend, Journal journal) @@ -47,11 +65,26 @@ public: , m_fastBackend (std::move (fastBackend)) , m_cache ("NodeStore", cacheTargetSize, cacheTargetSeconds, get_seconds_clock (), LogPartition::getJournal ()) + , m_negCache ("NodeStore", get_seconds_clock (), + cacheTargetSize, cacheTargetSeconds) + , m_readShut (false) + , m_readGen (0) { + for (int i = 0; i < readThreads; ++i) + m_readThreads.push_back (std::thread (&DatabaseImp::threadEntry, this)); } ~DatabaseImp () { + { + std::unique_lock lock (m_readLock); + m_readShut = true; + m_readCondVar.notify_all (); + m_readGenCondVar.notify_all (); + } + + BOOST_FOREACH (std::thread& th, m_readThreads) + th.join (); } String getName () const @@ -59,76 +92,105 @@ public: return m_backend->getName (); } + //------------------------------------------------------------------------------ + bool asyncFetch (uint256 const& hash, NodeObject::pointer& object) + { + // See if the object is in cache + object = m_cache.fetch (hash); + if (object || m_negCache.touch_if_exists (hash)) + return true; + + { + // No. Post a read + std::unique_lock lock (m_readLock); + if (m_readSet.insert (hash).second) + m_readCondVar.notify_one (); + } + + return false; + } + + void waitReads () + { + { + std::unique_lock lock (m_readLock); + + // Wake in two generations + uint64 const wakeGeneration = m_readGen + 2; + + while (!m_readShut && !m_readSet.empty () && (m_readGen < wakeGeneration)) + m_readGenCondVar.wait (lock); + } + + } + NodeObject::Ptr fetch (uint256 const& hash) { // See if the object already exists in the cache // NodeObject::Ptr obj = m_cache.fetch (hash); + if (obj != nullptr) + return obj; + + if (m_negCache.touch_if_exists (hash)) + return obj; + + // Check the database(s). + + bool foundInFastBackend = false; + + // Check the fast backend database if we have one + // + if (m_fastBackend != nullptr) + { + obj = fetchInternal (*m_fastBackend, hash); + + // If we found the object, avoid storing it again later. + if (obj != nullptr) + foundInFastBackend = true; + } + + // Are we still without an object? + // if (obj == nullptr) { - // There's still a chance it could be in one of the databases. - - bool foundInFastBackend = false; - - // Check the fast backend database if we have one + // Yes so at last we will try the main database. // - if (m_fastBackend != nullptr) - { - obj = fetchInternal (*m_fastBackend, hash); + obj = fetchInternal (*m_backend, hash); + } - // If we found the object, avoid storing it again later. - if (obj != nullptr) - foundInFastBackend = true; - } + if (obj == nullptr) + { + + // Just in case a write occurred + obj = m_cache.fetch (hash); - // Are we still without an object? - // if (obj == nullptr) { - // Yes so at last we will try the main database. - // - { - // Monitor this operation's load since it is expensive. - // - // VFALCO TODO Why is this an autoptr? Why can't it just be a plain old object? - // - // VFALCO NOTE Commented this out because it breaks the unit test! - // - //LoadEvent::autoptr event (getApp().getJobQueue ().getLoadEventAP (jtHO_READ, "HOS::retrieve")); - - obj = fetchInternal (*m_backend, hash); - } - - } - - // Did we finally get something? - // - if (obj != nullptr) - { - // Yes it so canonicalize. This solves the problem where - // more than one thread has its own copy of the same object. - // - m_cache.canonicalize (hash, obj); - - if (! foundInFastBackend) - { - // If we have a fast back end, store it there for later. - // - if (m_fastBackend != nullptr) - m_fastBackend->store (obj); - - // Since this was a 'hard' fetch, we will log it. - // - WriteLog (lsTRACE, NodeObject) << "HOS: " << hash << " fetch: in db"; - } + // We give up + m_negCache.insert (hash); } } else { - // found it! + // Ensure all threads get the same object + // + m_cache.canonicalize (hash, obj); + + if (! foundInFastBackend) + { + // If we have a fast back end, store it there for later. + // + if (m_fastBackend != nullptr) + m_fastBackend->store (obj); + + // Since this was a 'hard' fetch, we will log it. + // + WriteLog (lsTRACE, NodeObject) << "HOS: " << hash << " fetch: in db"; + } } return obj; @@ -168,30 +230,20 @@ public: Blob& data, uint256 const& hash) { - bool const keyFoundAndObjectCached = m_cache.refreshIfPresent (hash); + NodeObject::Ptr object = NodeObject::createObject (type, index, data, hash); - // VFALCO NOTE What happens if the key is found, but the object - // fell out of the cache? We will end up passing it - // to the backend anyway. - // - if (! keyFoundAndObjectCached) - { #if RIPPLE_VERIFY_NODEOBJECT_KEYS - assert (hash == Serializer::getSHA512Half (data)); + assert (hash == Serializer::getSHA512Half (data)); #endif - NodeObject::Ptr object = NodeObject::createObject ( - type, index, data, hash); + m_cache.canonicalize (hash, object, true); - if (!m_cache.canonicalize (hash, object)) - { - m_backend->store (object); + m_backend->store (object); - if (m_fastBackend) - m_fastBackend->store (object); - } + m_negCache.erase (hash); - } + if (m_fastBackend) + m_fastBackend->store (object); } //------------------------------------------------------------------------------ @@ -205,11 +257,14 @@ public: { m_cache.setTargetSize (size); m_cache.setTargetAge (age); + m_negCache.setTargetSize (size); + m_negCache.setTargetAge (age); } void sweep () { m_cache.sweep (); + m_negCache.sweep (); } int getWriteLoad () @@ -219,6 +274,51 @@ public: //------------------------------------------------------------------------------ + // Entry point for async read threads + void threadEntry () + { + Thread::setCurrentThreadName ("prefetch"); + while (1) + { + uint256 hash; + + { + std::unique_lock lock (m_readLock); + + while (!m_readShut && m_readSet.empty ()) + { + // all work is done + m_readGenCondVar.notify_all (); + m_readCondVar.wait (lock); + } + + if (m_readShut) + break; + + // Read in key order to make the back end more efficient + std::set ::iterator it = m_readSet.lower_bound (m_readLast); + if (it == m_readSet.end ()) + { + it = m_readSet.begin (); + + // A generation has completed + ++m_readGen; + m_readGenCondVar.notify_all (); + } + + hash = *it; + m_readSet.erase (it); + m_readLast = hash; + } + + // Perform the read + fetch (hash); + } + } + + //------------------------------------------------------------------------------ + + void visitAll (VisitCallback& callback) { m_backend->visitAll (callback); diff --git a/src/ripple_core/nodestore/impl/Manager.cpp b/src/ripple_core/nodestore/impl/Manager.cpp index 72f7f4506..e7342f99e 100644 --- a/src/ripple_core/nodestore/impl/Manager.cpp +++ b/src/ripple_core/nodestore/impl/Manager.cpp @@ -109,7 +109,7 @@ public: } std::unique_ptr make_Database (std::string const& name, - Scheduler& scheduler, Journal journal, + Scheduler& scheduler, Journal journal, int readThreads, Parameters const& backendParameters, Parameters fastBackendParameters) { @@ -121,7 +121,7 @@ public: ? make_Backend (fastBackendParameters, scheduler, journal) : nullptr); - return std::make_unique (name, scheduler, + return std::make_unique (name, scheduler, readThreads, std::move (backend), std::move (fastBackend), journal); } }; diff --git a/src/ripple_core/nodestore/tests/DatabaseTests.cpp b/src/ripple_core/nodestore/tests/DatabaseTests.cpp index e461cef57..758fcf2cb 100644 --- a/src/ripple_core/nodestore/tests/DatabaseTests.cpp +++ b/src/ripple_core/nodestore/tests/DatabaseTests.cpp @@ -54,7 +54,7 @@ public: // Write to source db { std::unique_ptr src (manager->make_Database ( - "test", scheduler, j, srcParams)); + "test", scheduler, j, 2, srcParams)); storeBatch (*src, batch); } @@ -63,7 +63,7 @@ public: { // Re-open the db std::unique_ptr src (manager->make_Database ( - "test", scheduler, j, srcParams)); + "test", scheduler, j, 2, srcParams)); // Set up the destination database File const dest_db (File::createTempFile ("dest_db")); @@ -72,7 +72,7 @@ public: destParams.set ("path", dest_db.getFullPathName ()); std::unique_ptr dest (manager->make_Database ( - "test", scheduler, j, destParams)); + "test", scheduler, j, 2, destParams)); beginTestCase (String ("import into '") + destBackendType + "' from '" + srcBackendType + "'"); @@ -130,7 +130,7 @@ public: { // Open the database std::unique_ptr db (manager->make_Database ("test", scheduler, - j, nodeParams, tempParams)); + j, 2, nodeParams, tempParams)); // Write the batch storeBatch (*db, batch); @@ -156,7 +156,7 @@ public: { // Re-open the database without the ephemeral DB std::unique_ptr db (manager->make_Database ( - "test", scheduler, j, nodeParams)); + "test", scheduler, j, 2, nodeParams)); // Read it back in Batch copy; @@ -172,7 +172,7 @@ public: { // Verify the ephemeral db std::unique_ptr db (manager->make_Database ("test", - scheduler, j, tempParams, StringPairArray ())); + scheduler, j, 2, tempParams, StringPairArray ())); // Read it back in Batch copy;