text stringlengths 0 93.6k |
|---|
# 用于存储章节的列表 |
chapters = [] |
# 遍历每一组文件 |
# 每3000 个文件为一组 |
for filename in group: |
# 打开并读取文件 |
with open(os.path.join('./temp', filename), 'r') as f: |
data = json.load(f) |
# 为每个文件创建一个章节 |
chapter = epub.EpubHtml(title=data['title'], file_name='chap_' + data['id'] + '.xhtml', lang='en') |
chapter.content = u'<h1>' + data['title'] + '</h1><p>' + data['text'] + '</p>' |
# 将章节添加到书中 |
book.add_item(chapter) |
# 将章节添加到章节列表中 |
chapters.append(chapter) |
# 定义书的结构 |
book.toc = (epub.Link(chapters[0].file_name, 'Introduction', 'intro'), |
(epub.Section('Simple English Wikipedia'), chapters)) |
# 将书的结构添加到书中 |
book.add_item(epub.EpubNcx()) |
book.add_item(epub.EpubNav()) |
# 定义书的逻辑 |
book.spine = ['nav'] + chapters |
# 写入文件 |
# write to output folder |
# create if it doesn't exist |
if not os.path.exists('./output'): |
os.makedirs('./output') |
# epub.write_epub('simple' + str(i) + '.epub', book, {}) |
epub.write_epub(os.path.join('./output', 'simple-english-wikiepedia-' + str(i) + '.epub'), book, {}) |
print('完成创建电子书: simple-english-wikiepedia-' + str(i) + '.epub') |
# 打印大小 |
print('电子书大小: {} KB'.format(os.path.getsize(os.path.join('./output', 'simple-english-wikiepedia-' + str(i) + '.epub')) / 1024)) |
# <FILESEP> |
import copy |
import torch |
import torch.nn as nn |
import torch.nn.functional as F |
from torch.optim.lr_scheduler import CosineAnnealingLR |
import wandb |
from IPython import embed |
from util import DEFAULT_DEVICE, compute_batched, update_exponential_moving_average |
EXP_ADV_MAX = 100. |
def asymmetric_l2_loss(u, tau): |
# from paper: "Offline Reinforcement Learning with Implicit Q-Learning" by Ilya et al. |
return torch.mean(torch.abs(tau - (u < 0).float()) * u**2) |
class IQL(nn.Module): |
def __init__(self, qf, vf, policy, max_steps, |
tau, alpha, value_lr=1e-4, policy_lr=1e-4, discount=0.99, beta=0.005): |
super().__init__() |
self.qf = qf.to(DEFAULT_DEVICE) |
self.q_target = copy.deepcopy(qf).requires_grad_(False).to(DEFAULT_DEVICE) |
self.vf = vf.to(DEFAULT_DEVICE) |
self.policy = policy.to(DEFAULT_DEVICE) |
self.v_optimizer = torch.optim.Adam(self.vf.parameters(), lr=value_lr) |
self.q_optimizer = torch.optim.Adam(self.qf.parameters(), lr=value_lr) |
self.policy_optimizer = torch.optim.Adam(self.policy.parameters(), lr=policy_lr) |
self.policy_lr_schedule = CosineAnnealingLR(self.policy_optimizer, max_steps) |
self.tau = tau |
self.alpha = alpha |
self.discount = discount |
self.beta = beta |
self.step = 0 |
self.pretrain_step = 0 |
def iql_update(self, observations, actions, next_observations, rewards, terminals): |
# the network will NOT update |
with torch.no_grad(): |
target_q = self.q_target(observations, actions) |
next_v = self.vf(next_observations) |
v = self.vf(observations) |
adv = target_q - v |
v_loss = asymmetric_l2_loss(adv, self.tau) |
self.v_optimizer.zero_grad(set_to_none=True) |
v_loss.backward() |
self.v_optimizer.step() |
# Update Q function |
targets = rewards + (1. - terminals.float()) * self.discount * next_v |
qs = self.qf.both(observations, actions) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.